Import Cobalt 13.93830
diff --git a/src/cobalt/base/tokens.h b/src/cobalt/base/tokens.h
index 9473e36..891c4a6 100644
--- a/src/cobalt/base/tokens.h
+++ b/src/cobalt/base/tokens.h
@@ -43,6 +43,7 @@
MacroOpWithNameOnly(animationend) \
MacroOpWithNameOnly(assertive) \
MacroOpWithNameOnly(attributes) \
+ MacroOpWithNameOnly(beforeunload) \
MacroOpWithNameOnly(blur) \
MacroOpWithNameOnly(boundary) \
MacroOpWithNameOnly(canplay) \
diff --git a/src/cobalt/browser/application.cc b/src/cobalt/browser/application.cc
index 9f855ca..2ac5506 100644
--- a/src/cobalt/browser/application.cc
+++ b/src/cobalt/browser/application.cc
@@ -93,16 +93,16 @@
#endif // ENABLE_REMOTE_DEBUGGING
#if defined(ENABLE_WEBDRIVER)
-#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
int GetWebDriverPort() {
// The default port on which the webdriver server should listen for incoming
// connections.
#if defined(SB_OVERRIDE_DEFAULT_WEBDRIVER_PORT)
const int kDefaultWebDriverPort = SB_OVERRIDE_DEFAULT_WEBDRIVER_PORT;
#else
- const int kDefaultWebDriverPort = 9515;
+ const int kDefaultWebDriverPort = 4444;
#endif // defined(SB_OVERRIDE_DEFAULT_WEBDRIVER_PORT)
int webdriver_port = kDefaultWebDriverPort;
+#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
CommandLine* command_line = CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kWebDriverPort)) {
if (!base::StringToInt(
@@ -114,22 +114,24 @@
webdriver_port = kDefaultWebDriverPort;
}
}
+#endif // ENABLE_DEBUG_COMMAND_LINE_SWITCHES
return webdriver_port;
}
std::string GetWebDriverListenIp() {
- // The default port on which the webdriver server should listen for incoming
+ // The default IP on which the webdriver server should listen for incoming
// connections.
std::string webdriver_listen_ip =
webdriver::WebDriverModule::kDefaultListenIp;
+#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
CommandLine* command_line = CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kWebDriverListenIp)) {
webdriver_listen_ip =
command_line->GetSwitchValueASCII(switches::kWebDriverListenIp);
}
+#endif // ENABLE_DEBUG_COMMAND_LINE_SWITCHES
return webdriver_listen_ip;
}
-#endif // ENABLE_DEBUG_COMMAND_LINE_SWITCHES
#endif // ENABLE_WEBDRIVER
GURL GetInitialURL() {
@@ -625,7 +627,12 @@
#if defined(ENABLE_WEBDRIVER)
#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
- if (command_line->HasSwitch(switches::kEnableWebDriver)) {
+ bool create_webdriver_module =
+ !command_line->HasSwitch(switches::kDisableWebDriver);
+#else
+ bool create_webdriver_module = true;
+#endif // ENABLE_DEBUG_COMMAND_LINE_SWITCHES
+ if (create_webdriver_module) {
web_driver_module_.reset(new webdriver::WebDriverModule(
GetWebDriverPort(), GetWebDriverListenIp(),
base::Bind(&BrowserModule::CreateSessionDriver,
@@ -636,7 +643,6 @@
base::Unretained(browser_module_.get())),
base::Bind(&Application::Quit, base::Unretained(this))));
}
-#endif // ENABLE_DEBUG_COMMAND_LINE_SWITCHES
#endif // ENABLE_WEBDRIVER
#if defined(ENABLE_REMOTE_DEBUGGING)
@@ -724,6 +730,9 @@
case kSbEventTypeUnpause:
case kSbEventTypeSuspend:
case kSbEventTypeResume:
+#if SB_API_VERSION >= SB_LOW_MEMORY_EVENT_API_VERSION
+ case kSbEventTypeLowMemory:
+#endif // SB_API_VERSION >= SB_LOW_MEMORY_EVENT_API_VERSION
OnApplicationEvent(starboard_event->type);
break;
case kSbEventTypeNetworkConnect:
@@ -739,11 +748,9 @@
DispatchEventInternal(new base::DeepLinkEvent(link));
break;
}
-#if SB_API_VERSION >= 4
case kSbEventTypeAccessiblitySettingsChanged:
DispatchEventInternal(new base::AccessibilitySettingsChangedEvent());
break;
-#endif // SB_API_VERSION >= 4
default:
DLOG(WARNING) << "Unhandled Starboard event of type: "
<< starboard_event->type;
@@ -815,6 +822,13 @@
browser_module_->Resume();
DLOG(INFO) << "Finished resuming.";
break;
+#if SB_API_VERSION >= SB_LOW_MEMORY_EVENT_API_VERSION
+ case kSbEventTypeLowMemory:
+ DLOG(INFO) << "Got low memory event.";
+ browser_module_->ReduceMemory();
+ DLOG(INFO) << "Finished reducing memory usage.";
+ break;
+#endif // SB_API_VERSION >= SB_LOW_MEMORY_EVENT_API_VERSION
default:
NOTREACHED() << "Unexpected event type: " << event_type;
return;
diff --git a/src/cobalt/browser/browser_bindings_gen.gyp b/src/cobalt/browser/browser_bindings_gen.gyp
index cf2ba72..41f4937 100644
--- a/src/cobalt/browser/browser_bindings_gen.gyp
+++ b/src/cobalt/browser/browser_bindings_gen.gyp
@@ -65,6 +65,7 @@
'../dom/comment.idl',
'../dom/console.idl',
'../dom/crypto.idl',
+ '../dom/custom_event.idl',
'../dom/data_view.idl',
'../dom/device_orientation_event.idl',
'../dom/document.idl',
@@ -79,6 +80,7 @@
'../dom/dom_string_map.idl',
'../dom/dom_token_list.idl',
'../dom/element.idl',
+ '../dom/error_event.idl',
'../dom/event.idl',
'../dom/event_listener.idl',
'../dom/event_target.idl',
@@ -219,9 +221,11 @@
'../audio/audio_node_channel_count_mode.idl',
'../audio/audio_node_channel_interpretation.idl',
'../dom/blob_property_bag.idl',
+ '../dom/custom_event_init.idl',
'../dom/device_orientation_event_init.idl',
'../dom/document_ready_state.idl',
'../dom/dom_parser_supported_type.idl',
+ '../dom/error_event_init.idl',
'../dom/event_init.idl',
'../dom/event_modifier_init.idl',
'../dom/focus_event_init.idl',
diff --git a/src/cobalt/browser/browser_module.cc b/src/cobalt/browser/browser_module.cc
index a9eb520..cfc4a45 100644
--- a/src/cobalt/browser/browser_module.cc
+++ b/src/cobalt/browser/browser_module.cc
@@ -111,8 +111,13 @@
// TODO: Subscribe to viewport size changes.
+const int kMainWebModuleZIndex = 1;
+const int kSplashScreenZIndex = 2;
+
#if defined(ENABLE_DEBUG_CONSOLE)
+const int kDebugConsoleZIndex = 3;
+
const char kFuzzerToggleCommand[] = "fuzzer_toggle";
const char kFuzzerToggleCommandShortHelp[] = "Toggles the input fuzzer on/off.";
const char kFuzzerToggleCommandLongHelp[] =
@@ -211,9 +216,7 @@
storage_manager_(make_scoped_ptr(new StorageUpgradeHandler(url))
.PassAs<storage::StorageManager::UpgradeHandler>(),
options_.storage_manager_options),
-#if defined(OS_STARBOARD)
is_rendered_(false),
-#endif // OS_STARBOARD
#if defined(ENABLE_GPU_ARRAY_BUFFER_ALLOCATOR)
array_buffer_allocator_(
new ResourceProviderArrayBufferAllocator(GetResourceProvider())),
@@ -251,7 +254,8 @@
#endif
will_quit_(false),
application_state_(initial_application_state),
- splash_screen_cache_(new SplashScreenCache()) {
+ splash_screen_cache_(new SplashScreenCache()),
+ produced_render_tree_(false) {
#if SB_HAS(CORE_DUMP_HANDLER_SUPPORT)
SbCoreDumpRegisterHandler(BrowserModule::CoreDumpHandler, this);
on_error_triggered_count_ = 0;
@@ -296,9 +300,7 @@
OnFuzzerToggle(std::string());
}
if (command_line->HasSwitch(switches::kSuspendFuzzer)) {
-#if SB_API_VERSION >= 4
suspend_fuzzer_.emplace();
-#endif
}
#endif // ENABLE_DEBUG_CONSOLE && ENABLE_DEBUG_COMMAND_LINE_SWITCHES
@@ -403,13 +405,18 @@
base::optional<std::string> key = SplashScreenCache::GetKeyForStartUrl(url);
if (fallback_splash_screen_url_ ||
(key && splash_screen_cache_->IsSplashScreenCached(*key))) {
+ // Create the splash screen layer.
+ splash_screen_layer_ =
+ render_tree_combiner_->CreateLayer(kSplashScreenZIndex);
+
splash_screen_.reset(new SplashScreen(
application_state_,
- base::Bind(&BrowserModule::QueueOnRenderTreeProduced,
+ base::Bind(&BrowserModule::QueueOnSplashScreenRenderTreeProduced,
base::Unretained(this)),
&network_module_, viewport_size, GetResourceProvider(),
kLayoutMaxRefreshFrequencyInHz, *fallback_splash_screen_url_, url,
- splash_screen_cache_.get()));
+ splash_screen_cache_.get(),
+ base::Bind(&BrowserModule::DestroySplashScreen, weak_this_)));
lifecycle_observers_.AddObserver(splash_screen_.get());
}
@@ -473,8 +480,6 @@
return;
}
- DestroySplashScreen();
-
// This log is relied on by the webdriver benchmark tests, so it shouldn't be
// changed unless the corresponding benchmark logic is changed as well.
LOG(INFO) << "Loaded WebModule";
@@ -519,26 +524,67 @@
base::Bind(&BrowserModule::ProcessRenderTreeSubmissionQueue, weak_this_));
}
+void BrowserModule::QueueOnSplashScreenRenderTreeProduced(
+ const browser::WebModule::LayoutResults& layout_results) {
+ TRACE_EVENT0("cobalt::browser",
+ "BrowserModule::QueueOnSplashScreenRenderTreeProduced()");
+ render_tree_submission_queue_.AddMessage(
+ base::Bind(&BrowserModule::OnSplashScreenRenderTreeProduced,
+ base::Unretained(this), layout_results));
+ self_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&BrowserModule::ProcessRenderTreeSubmissionQueue, weak_this_));
+}
+
void BrowserModule::OnRenderTreeProduced(
const browser::WebModule::LayoutResults& layout_results) {
TRACE_EVENT0("cobalt::browser", "BrowserModule::OnRenderTreeProduced()");
DCHECK_EQ(MessageLoop::current(), self_message_loop_);
+
+ if (splash_screen_ && !produced_render_tree_) {
+ splash_screen_->Shutdown();
+ }
+ produced_render_tree_ = true;
+
if (application_state_ == base::kApplicationStatePreloading ||
- !render_tree_combiner_) {
+ !render_tree_combiner_ || !main_web_module_layer_) {
+ return;
+ }
+ renderer::Submission renderer_submission(layout_results.render_tree,
+ layout_results.layout_time);
+ renderer_submission.on_rasterized_callback = base::Bind(
+ &BrowserModule::OnRendererSubmissionRasterized, base::Unretained(this));
+ main_web_module_layer_->Submit(renderer_submission, true /* receive_time */);
+
+#if defined(ENABLE_SCREENSHOT)
+ screen_shot_writer_->SetLastPipelineSubmission(renderer::Submission(
+ layout_results.render_tree, layout_results.layout_time));
+#endif
+}
+
+void BrowserModule::OnSplashScreenRenderTreeProduced(
+ const browser::WebModule::LayoutResults& layout_results) {
+ TRACE_EVENT0("cobalt::browser",
+ "BrowserModule::OnSplashScreenRenderTreeProduced()");
+ DCHECK_EQ(MessageLoop::current(), self_message_loop_);
+
+ if (application_state_ == base::kApplicationStatePreloading ||
+ !render_tree_combiner_ || !splash_screen_layer_) {
return;
}
renderer::Submission renderer_submission(layout_results.render_tree,
layout_results.layout_time);
-#if defined(OS_STARBOARD)
renderer_submission.on_rasterized_callback = base::Bind(
&BrowserModule::OnRendererSubmissionRasterized, base::Unretained(this));
-#endif // OS_STARBOARD
- render_tree_combiner_->UpdateMainRenderTree(renderer_submission);
+ splash_screen_layer_->Submit(renderer_submission, false /* receive_time */);
#if defined(ENABLE_SCREENSHOT)
- screen_shot_writer_->SetLastPipelineSubmission(renderer::Submission(
- layout_results.render_tree, layout_results.layout_time));
+// TODO: write screen shot using render_tree_combinder_ (to combine
+// splash screen and main web_module). Consider when the splash
+// screen is overlaid on top of the main web module render tree, and
+// a screenshot is taken : there will be a race condition on which
+// web module update their render tree last.
#endif
}
@@ -549,11 +595,7 @@
}
#endif
-#if defined(OS_STARBOARD)
SbSystemRequestStop(0);
-#else
- LOG(WARNING) << "window.close() is not supported on this platform.";
-#endif
}
void BrowserModule::OnWindowMinimize() {
@@ -563,11 +605,7 @@
}
#endif
-#if defined(OS_STARBOARD) && SB_API_VERSION >= 4
SbSystemRequestSuspend();
-#else
- LOG(WARNING) << "window.minimize() is not supported on this platform.";
-#endif
}
#if defined(ENABLE_DEBUG_CONSOLE)
@@ -632,16 +670,16 @@
"BrowserModule::OnDebugConsoleRenderTreeProduced()");
DCHECK_EQ(MessageLoop::current(), self_message_loop_);
if (application_state_ == base::kApplicationStatePreloading ||
- !render_tree_combiner_) {
+ !render_tree_combiner_ || !debug_console_layer_) {
return;
}
if (debug_console_->GetMode() == debug::DebugHub::kDebugConsoleOff) {
- render_tree_combiner_->UpdateDebugConsoleRenderTree(base::nullopt);
+ debug_console_layer_->Submit(base::nullopt);
return;
}
- render_tree_combiner_->UpdateDebugConsoleRenderTree(renderer::Submission(
+ debug_console_layer_->Submit(renderer::Submission(
layout_results.render_tree, layout_results.layout_time));
}
@@ -804,9 +842,15 @@
void BrowserModule::DestroySplashScreen() {
TRACE_EVENT0("cobalt::browser", "BrowserModule::DestroySplashScreen()");
+ if (MessageLoop::current() != self_message_loop_) {
+ self_message_loop_->PostTask(
+ FROM_HERE, base::Bind(&BrowserModule::DestroySplashScreen, weak_this_));
+ return;
+ }
if (splash_screen_) {
lifecycle_observers_.RemoveObserver(splash_screen_.get());
}
+ splash_screen_layer_.reset(NULL);
splash_screen_.reset(NULL);
}
@@ -912,6 +956,22 @@
application_state_ = base::kApplicationStatePaused;
}
+void BrowserModule::ReduceMemory() {
+ if (splash_screen_) {
+ splash_screen_->ReduceMemory();
+ }
+
+#if defined(ENABLE_DEBUG_CONSOLE)
+ if (debug_console_) {
+ debug_console_->ReduceMemory();
+ }
+#endif // defined(ENABLE_DEBUG_CONSOLE)
+
+ if (web_module_) {
+ web_module_->ReduceMemory();
+ }
+}
+
void BrowserModule::CheckMemory(
const int64_t& used_cpu_memory,
const base::optional<int64_t>& used_gpu_memory) {
@@ -923,7 +983,6 @@
used_gpu_memory);
}
-#if defined(OS_STARBOARD)
void BrowserModule::OnRendererSubmissionRasterized() {
TRACE_EVENT0("cobalt::browser",
"BrowserModule::OnRendererSubmissionRasterized()");
@@ -933,7 +992,6 @@
SbSystemHideSplashScreen();
}
}
-#endif // OS_STARBOARD
#if defined(COBALT_CHECK_RENDER_TIMEOUT)
void BrowserModule::OnPollForRenderTimeout(const GURL& url) {
@@ -1021,11 +1079,14 @@
render_tree_combiner_.reset(
new RenderTreeCombiner(renderer_module_.get(), GetViewportSize()));
-
- // Always render the debug console. It will draw nothing if disabled.
- // This setting is ignored if ENABLE_DEBUG_CONSOLE is not defined.
- // TODO: Render tree combiner should probably be refactored.
- render_tree_combiner_->set_render_debug_console(true);
+ // Create the main web module layer.
+ main_web_module_layer_ =
+ render_tree_combiner_->CreateLayer(kMainWebModuleZIndex);
+// Create the debug console layer.
+#if defined(ENABLE_DEBUG_CONSOLE)
+ debug_console_layer_ =
+ render_tree_combiner_->CreateLayer(kDebugConsoleZIndex);
+#endif
#if defined(ENABLE_SCREENSHOT)
screen_shot_writer_.reset(new ScreenShotWriter(renderer_module_->pipeline()));
@@ -1085,8 +1146,14 @@
// Clear out the render tree combiner so that it doesn't hold on to any
// render tree resources either.
- if (render_tree_combiner_) {
- render_tree_combiner_->Reset();
+ if (main_web_module_layer_) {
+ main_web_module_layer_->Reset();
+ }
+ if (splash_screen_layer_) {
+ splash_screen_layer_->Reset();
+ }
+ if (debug_console_layer_) {
+ debug_console_layer_->Reset();
}
#if defined(ENABLE_GPU_ARRAY_BUFFER_ALLOCATOR)
diff --git a/src/cobalt/browser/browser_module.h b/src/cobalt/browser/browser_module.h
index ba3861d..386eb0e 100644
--- a/src/cobalt/browser/browser_module.h
+++ b/src/cobalt/browser/browser_module.h
@@ -142,6 +142,10 @@
void Suspend();
void Resume();
+ // Attempt to reduce overall memory consumption. Called in response to a
+ // system indication that memory usage is nearing a critical level.
+ void ReduceMemory();
+
void CheckMemory(const int64_t& used_cpu_memory,
const base::optional<int64_t>& used_gpu_memory);
@@ -173,6 +177,13 @@
void OnRenderTreeProduced(
const browser::WebModule::LayoutResults& layout_results);
+ // Glue function to deal with the production of the splash screen render tree,
+ // and will manage handing it off to the renderer.
+ void QueueOnSplashScreenRenderTreeProduced(
+ const browser::WebModule::LayoutResults& layout_results);
+ void OnSplashScreenRenderTreeProduced(
+ const browser::WebModule::LayoutResults& layout_results);
+
// Saves/loads the debug console mode to/from local storage so we can
// persist the user's preference.
void SaveDebugConsoleMode();
@@ -357,8 +368,11 @@
// Sets up the network component for requesting internet resources.
network::NetworkModule network_module_;
- // Manages the two render trees, combines and renders them.
+ // Manages the three render trees, combines and renders them.
scoped_ptr<RenderTreeCombiner> render_tree_combiner_;
+ scoped_ptr<RenderTreeCombiner::Layer> main_web_module_layer_;
+ scoped_ptr<RenderTreeCombiner::Layer> debug_console_layer_;
+ scoped_ptr<RenderTreeCombiner::Layer> splash_screen_layer_;
#if defined(ENABLE_SCREENSHOT)
// Helper object to create screen shots of the last layout tree.
@@ -459,6 +473,9 @@
// The splash screen cache.
scoped_ptr<SplashScreenCache> splash_screen_cache_;
+
+ // Whether or not the main WebModule has produced any render trees yet.
+ bool produced_render_tree_;
};
} // namespace browser
diff --git a/src/cobalt/browser/debug_console.h b/src/cobalt/browser/debug_console.h
index 9ecbef5..6ed291d 100644
--- a/src/cobalt/browser/debug_console.h
+++ b/src/cobalt/browser/debug_console.h
@@ -79,6 +79,8 @@
web_module_->Resume(resource_provider);
}
+ void ReduceMemory() { web_module_->ReduceMemory(); }
+
private:
void OnError(const GURL& /* url */, const std::string& error) {
LOG(ERROR) << error;
diff --git a/src/cobalt/browser/debug_console/console_values.js b/src/cobalt/browser/debug_console/console_values.js
index 2ed232e..444ca48 100644
--- a/src/cobalt/browser/debug_console/console_values.js
+++ b/src/cobalt/browser/debug_console/console_values.js
@@ -20,8 +20,9 @@
this.DEFAULT_KEY = 'default';
// Reduced space-separated list of CVal prefixes to display at start-up.
this.DEFAULT_ACTIVE_SET =
- 'Cobalt DevTools Memory.CPU Memory.MainWebModule Memory.JS Memory.Font ' +
- 'Event.Duration.MainWebModule.KeyDown Renderer.Rasterize.Duration';
+ 'Cobalt DevTools WebDriver Memory.CPU Memory.MainWebModule Memory.JS ' +
+ 'Memory.Font Event.Duration.MainWebModule.KeyDown ' +
+ 'Renderer.Rasterize.Duration';
var names = window.debugHub.getConsoleValueNames();
this.allCVals = names.split(' ');
diff --git a/src/cobalt/browser/memory_tracker/tool/tool_impl.cc b/src/cobalt/browser/memory_tracker/tool/tool_impl.cc
index f2a92bb..65de06f 100644
--- a/src/cobalt/browser/memory_tracker/tool/tool_impl.cc
+++ b/src/cobalt/browser/memory_tracker/tool/tool_impl.cc
@@ -30,7 +30,6 @@
#include "cobalt/browser/memory_tracker/tool/params.h"
#include "cobalt/browser/memory_tracker/tool/tool_thread.h"
#include "cobalt/browser/memory_tracker/tool/util.h"
-#include "cobalt/script/mozjs/util/stack_trace_helpers.h"
#include "nb/analytics/memory_tracker.h"
#include "nb/analytics/memory_tracker_helpers.h"
#include "nb/concurrent_map.h"
diff --git a/src/cobalt/browser/render_tree_combiner.cc b/src/cobalt/browser/render_tree_combiner.cc
index 171fbc3..78fe4be 100644
--- a/src/cobalt/browser/render_tree_combiner.cc
+++ b/src/cobalt/browser/render_tree_combiner.cc
@@ -14,99 +14,97 @@
#include "cobalt/browser/render_tree_combiner.h"
+#include <map>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/optional.h"
+#include "base/time.h"
#include "cobalt/render_tree/composition_node.h"
#include "cobalt/render_tree/rect_node.h"
+#include "cobalt/renderer/renderer_module.h"
+#include "cobalt/renderer/submission.h"
namespace cobalt {
namespace browser {
-#if defined(ENABLE_DEBUG_CONSOLE)
+RenderTreeCombiner::Layer::Layer(RenderTreeCombiner* render_tree_combiner)
+ : render_tree_combiner_(render_tree_combiner),
+ render_tree_(base::nullopt),
+ receipt_time_(base::nullopt) {}
+
+RenderTreeCombiner::Layer::~Layer() {
+ DCHECK(render_tree_combiner_);
+ render_tree_combiner_->RemoveLayer(this);
+}
+
+void RenderTreeCombiner::Layer::Submit(
+ const base::optional<renderer::Submission>& render_tree_submission,
+ bool receive_time) {
+ render_tree_ = render_tree_submission;
+ if (receive_time) {
+ receipt_time_ = base::TimeTicks::HighResNow();
+ } else {
+ receipt_time_ = base::nullopt;
+ }
+ DCHECK(render_tree_combiner_);
+ render_tree_combiner_->SubmitToRenderer();
+}
+
RenderTreeCombiner::RenderTreeCombiner(
renderer::RendererModule* renderer_module, const math::Size& viewport_size)
- : render_debug_console_(true),
- renderer_module_(renderer_module),
- viewport_size_(viewport_size) {}
+ : renderer_module_(renderer_module), viewport_size_(viewport_size) {}
-RenderTreeCombiner::~RenderTreeCombiner() {}
+scoped_ptr<RenderTreeCombiner::Layer> RenderTreeCombiner::CreateLayer(
+ int z_index) {
+ if (layers_.count(z_index) > 0) {
+ return scoped_ptr<RenderTreeCombiner::Layer>(NULL);
+ }
+ RenderTreeCombiner::Layer* layer = new Layer(this);
+ layers_[z_index] = layer;
-void RenderTreeCombiner::Reset() {
- main_render_tree_ = base::nullopt;
- debug_console_render_tree_ = base::nullopt;
- main_render_tree_receipt_time_ = base::nullopt;
+ return scoped_ptr<RenderTreeCombiner::Layer>(layers_[z_index]);
}
-void RenderTreeCombiner::UpdateMainRenderTree(
- const renderer::Submission& render_tree_submission) {
- main_render_tree_ = render_tree_submission;
- main_render_tree_receipt_time_ = base::TimeTicks::HighResNow();
- SubmitToRenderer();
-}
-
-void RenderTreeCombiner::UpdateDebugConsoleRenderTree(
- const base::optional<renderer::Submission>& render_tree_submission) {
- debug_console_render_tree_ = render_tree_submission;
- SubmitToRenderer();
+void RenderTreeCombiner::RemoveLayer(const Layer* layer) {
+ for (auto it = layers_.begin(); it != layers_.end(); /* no increment */) {
+ if (it->second == layer) {
+ it = layers_.erase(it);
+ } else {
+ ++it;
+ }
+ }
}
void RenderTreeCombiner::SubmitToRenderer() {
- if (render_debug_console_ && debug_console_render_tree_) {
- if (main_render_tree_) {
- render_tree::CompositionNode::Builder builder;
- builder.AddChild(main_render_tree_->render_tree);
- builder.AddChild(debug_console_render_tree_->render_tree);
- scoped_refptr<render_tree::Node> combined_tree =
- new render_tree::CompositionNode(builder);
+ render_tree::CompositionNode::Builder builder;
- // Setup time to be based off of the main submitted tree only.
- // TODO: Setup a "layers" interface on the Pipeline so that
- // trees can be combined and animated there, properly.
- renderer::Submission combined_submission(*main_render_tree_);
- combined_submission.render_tree = combined_tree;
- combined_submission.time_offset =
- main_render_tree_->time_offset +
- (base::TimeTicks::HighResNow() - *main_render_tree_receipt_time_);
-
- renderer_module_->pipeline()->Submit(combined_submission);
- } else {
- // If we are rendering the debug console by itself, give it a solid black
- // background to it.
- render_tree::CompositionNode::Builder builder;
- builder.AddChild(new render_tree::RectNode(
- math::RectF(viewport_size_),
- scoped_ptr<render_tree::Brush>(new render_tree::SolidColorBrush(
- render_tree::ColorRGBA(0.0f, 0.0f, 0.0f, 1.0f)))));
- builder.AddChild(debug_console_render_tree_->render_tree);
-
- renderer::Submission combined_submission(*debug_console_render_tree_);
- combined_submission.render_tree =
- new render_tree::CompositionNode(builder);
- renderer_module_->pipeline()->Submit(combined_submission);
+ // Add children for all layers in order.
+ base::optional<renderer::Submission> first_tree = base::nullopt;
+ base::optional<renderer::Submission> combined_submission = base::nullopt;
+ for (auto it = layers_.begin(); it != layers_.end(); ++it) {
+ RenderTreeCombiner::Layer* layer = it->second;
+ if (layer->render_tree_) {
+ builder.AddChild(layer->render_tree_->render_tree);
+ first_tree = layer->render_tree_;
+ // Make the combined submission with the first receipt_time_ we find.
+ if (!combined_submission && layer->receipt_time_) {
+ combined_submission = renderer::Submission(*layer->render_tree_);
+ combined_submission->time_offset =
+ layer->render_tree_->time_offset +
+ (base::TimeTicks::HighResNow() - *layer->receipt_time_);
+ }
}
- } else if (main_render_tree_) {
- renderer_module_->pipeline()->Submit(*main_render_tree_);
}
+ if (!first_tree) {
+ return;
+ }
+ if (!combined_submission) {
+ // None of the layers store the time.
+ combined_submission = renderer::Submission(*first_tree);
+ }
+
+ combined_submission->render_tree = new render_tree::CompositionNode(builder);
+ renderer_module_->pipeline()->Submit(*combined_submission);
}
-#else // ENABLE_DEBUG_CONSOLE
-RenderTreeCombiner::RenderTreeCombiner(
- renderer::RendererModule* renderer_module, const math::Size& viewport_size)
- : renderer_module_(renderer_module) {
- UNREFERENCED_PARAMETER(viewport_size);
-}
-
-RenderTreeCombiner::~RenderTreeCombiner() {}
-
-void RenderTreeCombiner::Reset() {}
-
-void RenderTreeCombiner::UpdateMainRenderTree(
- const renderer::Submission& render_tree_submission) {
- renderer_module_->pipeline()->Submit(render_tree_submission);
-}
-
-void RenderTreeCombiner::UpdateDebugConsoleRenderTree(
- const base::optional<renderer::Submission>& render_tree_submission) {
- UNREFERENCED_PARAMETER(render_tree_submission);
-}
-#endif // ENABLE_DEBUG_CONSOLE
-
} // namespace browser
} // namespace cobalt
diff --git a/src/cobalt/browser/render_tree_combiner.h b/src/cobalt/browser/render_tree_combiner.h
index 76b1a22..65f3127 100644
--- a/src/cobalt/browser/render_tree_combiner.h
+++ b/src/cobalt/browser/render_tree_combiner.h
@@ -15,49 +15,72 @@
#ifndef COBALT_BROWSER_RENDER_TREE_COMBINER_H_
#define COBALT_BROWSER_RENDER_TREE_COMBINER_H_
+#include <map>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/optional.h"
+#include "base/time.h"
#include "cobalt/renderer/renderer_module.h"
#include "cobalt/renderer/submission.h"
namespace cobalt {
namespace browser {
-// Combines the main and debug console render trees. Caches the individual
-// trees as they are produced. Re-renders when either tree changes.
-// This class is only fully implemented when ENABLE_DEBUG_CONSOLE is defined,
-// otherwise (e.g. in release builds) a stub implementation is used.
+// Combines rendering layers (such as the main, splash screen, and
+// debug console). Caches the individual trees as they are produced.
+// Re-renders when any tree changes.
class RenderTreeCombiner {
public:
+ // Layer represents the render layer corresponding to the main web
+ // module, the splash screen, or the debug console and are used to
+ // create and submit a combined tree to the RendererModule's
+ // pipeline. Layers are combined in order of the |z_index| specifed
+ // at the Layers' creation. The RenderTreeCombiner stores pointers
+ // to Layers. The Layers are owned by the caller of
+ // RenderTreeCombiner::CreateLayer.
+ class Layer {
+ public:
+ ~Layer();
+
+ void Reset() {
+ render_tree_ = base::nullopt;
+ receipt_time_ = base::nullopt;
+ }
+
+ // Submit render tree to the layer, and specify whether the time
+ // received should be stored.
+ void Submit(
+ const base::optional<renderer::Submission>& render_tree_submission,
+ bool receive_time = false);
+
+ private:
+ friend class RenderTreeCombiner;
+
+ explicit Layer(RenderTreeCombiner* render_tree_combiner = NULL);
+
+ RenderTreeCombiner* render_tree_combiner_;
+
+ base::optional<renderer::Submission> render_tree_;
+ base::optional<base::TimeTicks> receipt_time_;
+ };
+
explicit RenderTreeCombiner(renderer::RendererModule* renderer_module,
const math::Size& viewport_size);
- ~RenderTreeCombiner();
+ ~RenderTreeCombiner() {}
- void Reset();
-
- // Update the main web module render tree.
- void UpdateMainRenderTree(const renderer::Submission& render_tree_submission);
-
- // Update the debug console render tree.
- void UpdateDebugConsoleRenderTree(
- const base::optional<renderer::Submission>& render_tree_submission);
-
-#if defined(ENABLE_DEBUG_CONSOLE)
- bool render_debug_console() const { return render_debug_console_; }
- void set_render_debug_console(bool render_debug_console) {
- render_debug_console_ = render_debug_console;
- }
-#else // ENABLE_DEBUG_CONSOLE
- bool render_debug_console() const { return false; }
- void set_render_debug_console(bool render_debug_console) {
- UNREFERENCED_PARAMETER(render_debug_console);
- }
-#endif // ENABLE_DEBUG_CONSOLE
+ // Create a Layer with a given |z_index|. If a Layer already exists
+ // at |z_index|, return NULL, and no Layer is created.
+ scoped_ptr<Layer> CreateLayer(int z_index);
private:
-#if defined(ENABLE_DEBUG_CONSOLE)
- // Combines the two cached render trees (main/debug) and renders the result.
- void SubmitToRenderer();
+ // The layers keyed on their z_index.
+ std::map<int, Layer*> layers_;
- bool render_debug_console_;
+ // Removes a layer from |layers_|. Called by the Layer destructor.
+ void RemoveLayer(const Layer* layer);
+
+ // Combines the cached render trees and renders the result.
+ void SubmitToRenderer();
// Local reference to the render pipeline, so we can submit the combined tree.
// Reference counted pointer not necessary here.
@@ -65,22 +88,6 @@
// The size of the output viewport.
math::Size viewport_size_;
-
- // Local references to the main and debug console render trees/animation maps
- // so we can combine them.
- base::optional<renderer::Submission> main_render_tree_;
-
- // This is the time that we received the last main render tree submission.
- // used so that we know what time to forward the submission to the pipeline
- // with.
- base::optional<base::TimeTicks> main_render_tree_receipt_time_;
-
- // The debug console render tree submission.
- base::optional<renderer::Submission> debug_console_render_tree_;
-#else // ENABLE_DEBUG_CONSOLE
- // Use this local reference even in release builds to submit the main tree.
- renderer::RendererModule* renderer_module_;
-#endif // ENABLE_DEBUG_CONSOLE
};
} // namespace browser
diff --git a/src/cobalt/browser/splash_screen.cc b/src/cobalt/browser/splash_screen.cc
index 8c78ce2..2999b41 100644
--- a/src/cobalt/browser/splash_screen.cc
+++ b/src/cobalt/browser/splash_screen.cc
@@ -17,25 +17,51 @@
#include <string>
#include "base/bind.h"
+#include "base/callback.h"
+#include "base/cancelable_callback.h"
#include "base/threading/platform_thread.h"
+#include "base/time.h"
#include "cobalt/browser/splash_screen_cache.h"
#include "cobalt/loader/cache_fetcher.h"
namespace cobalt {
namespace browser {
+namespace {
-SplashScreen::SplashScreen(base::ApplicationState initial_application_state,
- const WebModule::OnRenderTreeProducedCallback&
- render_tree_produced_callback,
- network::NetworkModule* network_module,
- const math::Size& window_dimensions,
- render_tree::ResourceProvider* resource_provider,
- float layout_refresh_rate,
- const GURL& fallback_splash_screen_url,
- const GURL& initial_main_web_module_url,
- SplashScreenCache* splash_screen_cache)
+const int kSplashShutdownSeconds = 2;
+
+void PostCallbackToMessageLoop(const base::Closure& callback,
+ MessageLoop* message_loop) {
+ DCHECK(message_loop);
+ message_loop->PostTask(FROM_HERE, callback);
+}
+
+// TODO: consolidate definitions of BindToLoop / BindToCurrentLoop
+// from here and media in base.
+base::Closure BindToLoop(const base::Closure& callback,
+ MessageLoop* message_loop) {
+ return base::Bind(&PostCallbackToMessageLoop, callback, message_loop);
+}
+
+void OnError(const GURL& /* url */, const std::string& error) {
+ LOG(ERROR) << error;
+}
+
+} // namespace
+
+SplashScreen::SplashScreen(
+ base::ApplicationState initial_application_state,
+ const WebModule::OnRenderTreeProducedCallback&
+ render_tree_produced_callback,
+ network::NetworkModule* network_module, const math::Size& window_dimensions,
+ render_tree::ResourceProvider* resource_provider, float layout_refresh_rate,
+ const GURL& fallback_splash_screen_url,
+ const GURL& initial_main_web_module_url,
+ SplashScreenCache* splash_screen_cache,
+ const base::Callback<void()>& on_splash_screen_shutdown_complete)
: render_tree_produced_callback_(render_tree_produced_callback),
- is_ready_(true, false) {
+ self_message_loop_(MessageLoop::current()),
+ on_splash_screen_shutdown_complete_(on_splash_screen_shutdown_complete) {
WebModule::Options web_module_options;
web_module_options.name = "SplashScreenWebModule";
@@ -57,11 +83,14 @@
web_module_options.splash_screen_cache = splash_screen_cache;
}
+ base::Callback<void()> on_window_close(
+ BindToLoop(on_splash_screen_shutdown_complete, self_message_loop_));
+
+ web_module_options.on_before_unload_fired_but_not_handled = on_window_close;
+
web_module_.reset(new WebModule(
- url_to_pass, initial_application_state,
- base::Bind(&SplashScreen::OnRenderTreeProduced, base::Unretained(this)),
- base::Bind(&SplashScreen::OnError, base::Unretained(this)),
- base::Bind(&SplashScreen::OnWindowClosed, base::Unretained(this)),
+ url_to_pass, initial_application_state, render_tree_produced_callback_,
+ base::Bind(&OnError), on_window_close,
base::Closure(), // window_minimize_callback
&stub_media_module_, network_module, window_dimensions,
1.f /*video_pixel_ratio*/, resource_provider, layout_refresh_rate,
@@ -69,23 +98,23 @@
}
SplashScreen::~SplashScreen() {
+ DCHECK_EQ(MessageLoop::current(), self_message_loop_);
// Destroy the web module first to prevent our callbacks from being called
// (from another thread) while member objects are being destroyed.
web_module_.reset();
+ // Cancel any pending run of the splash screen shutdown callback.
+ on_splash_screen_shutdown_complete_.Cancel();
}
-void SplashScreen::WaitUntilReady() {
- is_ready_.Wait();
-}
-
-void SplashScreen::OnRenderTreeProduced(
- const browser::WebModule::LayoutResults& layout_results) {
- is_ready_.Signal();
- render_tree_produced_callback_.Run(layout_results);
-}
-
-void SplashScreen::OnWindowClosed() {
- is_ready_.Signal();
+void SplashScreen::Shutdown() {
+ DCHECK_EQ(MessageLoop::current(), self_message_loop_);
+ DCHECK(web_module_);
+ if (!on_splash_screen_shutdown_complete_.callback().is_null()) {
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE, on_splash_screen_shutdown_complete_.callback(),
+ base::TimeDelta::FromSeconds(kSplashShutdownSeconds));
+ }
+ web_module_->InjectBeforeUnloadEvent();
}
} // namespace browser
diff --git a/src/cobalt/browser/splash_screen.h b/src/cobalt/browser/splash_screen.h
index 0e07915..73db062 100644
--- a/src/cobalt/browser/splash_screen.h
+++ b/src/cobalt/browser/splash_screen.h
@@ -33,16 +33,17 @@
//
class SplashScreen : public LifecycleObserver {
public:
- SplashScreen(base::ApplicationState initial_application_state,
- const WebModule::OnRenderTreeProducedCallback&
- render_tree_produced_callback,
- network::NetworkModule* network_module,
- const math::Size& window_dimensions,
- render_tree::ResourceProvider* resource_provider,
- float layout_refresh_rate,
- const GURL& fallback_splash_screen_url,
- const GURL& initial_main_web_module_url,
- cobalt::browser::SplashScreenCache* splash_screen_cache);
+ SplashScreen(
+ base::ApplicationState initial_application_state,
+ const WebModule::OnRenderTreeProducedCallback&
+ render_tree_produced_callback,
+ network::NetworkModule* network_module,
+ const math::Size& window_dimensions,
+ render_tree::ResourceProvider* resource_provider,
+ float layout_refresh_rate, const GURL& fallback_splash_screen_url,
+ const GURL& initial_main_web_module_url,
+ cobalt::browser::SplashScreenCache* splash_screen_cache,
+ const base::Callback<void()>& on_splash_screen_shutdown_complete);
~SplashScreen();
void SetSize(const math::Size& window_dimensions, float video_pixel_ratio) {
@@ -61,29 +62,32 @@
web_module_->Resume(resource_provider);
}
- // Block the caller until the splash screen is ready to be rendered.
- void WaitUntilReady();
+ void ReduceMemory() { web_module_->ReduceMemory(); }
+
+ // This dispatches event beforeunload in the WebModule. If
+ // beforeunload has any handlers or listeners, Shutdown waits for
+ // window.close to be called or a maximum of kSplashShutdownSeconds
+ // before running |on_splash_screen_shutdown_complete_|. If beforeunload has
+ // no handlers, |on_splash_screen_shutdown_complete_| is run immediately.
+ void Shutdown();
private:
- void OnRenderTreeProduced(
- const browser::WebModule::LayoutResults& layout_results);
-
- void OnError(const GURL& /* url */, const std::string& error) {
- is_ready_.Signal();
- LOG(ERROR) << error;
- }
-
+ // Run when window.close() is called by the WebModule.
void OnWindowClosed();
+ void OnWindowClosedInternal();
media::MediaModuleStub stub_media_module_;
WebModule::OnRenderTreeProducedCallback render_tree_produced_callback_;
- // Signalled once the splash screen has produced its first render tree or
- // an error occurred.
- base::WaitableEvent is_ready_;
-
scoped_ptr<WebModule> web_module_;
+
+ // The splash screen runs on this message loop.
+ MessageLoop* const self_message_loop_;
+
+ // This is called by Shutdown (via window.close) or after
+ // the time limit has been exceeded.
+ base::CancelableClosure on_splash_screen_shutdown_complete_;
};
} // namespace browser
diff --git a/src/cobalt/browser/suspend_fuzzer.cc b/src/cobalt/browser/suspend_fuzzer.cc
index b94158b..51a7265 100644
--- a/src/cobalt/browser/suspend_fuzzer.cc
+++ b/src/cobalt/browser/suspend_fuzzer.cc
@@ -43,20 +43,13 @@
void SuspendFuzzer::DoStep() {
DCHECK(MessageLoop::current() == thread_.message_loop());
-#if SB_API_VERSION < 4
- NOTREACHED() << "Cannot run suspend_fuzzer on SB_API_VERSION < 4.";
-#endif
if (step_type_ == kShouldRequestSuspend) {
SB_DLOG(INFO) << "suspend_fuzzer: Requesting suspend.";
-#if SB_API_VERSION >= 4
SbSystemRequestSuspend();
-#endif
step_type_ = kShouldRequestUnpause;
} else if (step_type_ == kShouldRequestUnpause) {
SB_DLOG(INFO) << "suspend_fuzzer: Requesting unpause.";
-#if SB_API_VERSION >= 4
SbSystemRequestUnpause();
-#endif
step_type_ = kShouldRequestSuspend;
} else {
NOTREACHED();
diff --git a/src/cobalt/browser/suspend_fuzzer.h b/src/cobalt/browser/suspend_fuzzer.h
index 4aa97cc..d1681cf 100644
--- a/src/cobalt/browser/suspend_fuzzer.h
+++ b/src/cobalt/browser/suspend_fuzzer.h
@@ -22,7 +22,7 @@
namespace browser {
// Repeatedly switch off between calling |SbSystemRequestSuspend| and
-// |SbSystemRequestUnpause|, or just no-op if on an SB_API_VERSION < 4.
+// |SbSystemRequestUnpause|.
class SuspendFuzzer {
public:
SuspendFuzzer();
diff --git a/src/cobalt/browser/switches.cc b/src/cobalt/browser/switches.cc
index 2c39eed..11e98c3 100644
--- a/src/cobalt/browser/switches.cc
+++ b/src/cobalt/browser/switches.cc
@@ -34,12 +34,12 @@
// Switches different debug console modes: on | hud | off
const char kDebugConsoleMode[] = "debug_console";
+// Do not create the WebDriver server.
+const char kDisableWebDriver[] = "disable_webdriver";
+
// Disable webm/vp9.
const char kDisableWebmVp9[] = "disable_webm_vp9";
-// Create WebDriver server.
-const char kEnableWebDriver[] = "enable_webdriver";
-
// Additional base directory for accessing web files via file://.
const char kExtraWebFileDir[] = "web_file_path";
@@ -83,8 +83,7 @@
const char kStubImageDecoder[] = "stub_image_decoder";
// If this flag is set, alternating calls to |SbSystemRequestSuspend| and
-// |SbSystemRequestUnpause| will be made periodically. Requires
-// SB_API_VERSION >= 4, and will otherwise just no-op.
+// |SbSystemRequestUnpause| will be made periodically.
const char kSuspendFuzzer[] = "suspend_fuzzer";
// If this is set, then a trace (see base/debug/trace_eventh.h) is started on
diff --git a/src/cobalt/browser/switches.h b/src/cobalt/browser/switches.h
index 4470c4e..4dc02ef 100644
--- a/src/cobalt/browser/switches.h
+++ b/src/cobalt/browser/switches.h
@@ -24,8 +24,8 @@
extern const char kAudioDecoderStub[];
extern const char kCspMode[];
extern const char kDebugConsoleMode[];
+extern const char kDisableWebDriver[];
extern const char kDisableWebmVp9[];
-extern const char kEnableWebDriver[];
extern const char kExtraWebFileDir[];
extern const char kFakeMicrophone[];
extern const char kIgnoreCertificateErrors[];
diff --git a/src/cobalt/browser/testdata/splash_screen/beforeunload.html b/src/cobalt/browser/testdata/splash_screen/beforeunload.html
new file mode 100644
index 0000000..5b6ecf9
--- /dev/null
+++ b/src/cobalt/browser/testdata/splash_screen/beforeunload.html
@@ -0,0 +1,91 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <style>
+.box {
+ width: 100px;
+ height: 100px;
+ background-color: red;
+ color : yellow;
+ font-size: 20px;
+ left: 0px;
+ top: 0px;
+ position: absolute;
+ transition: background-color .25s;
+ transition-timing-function: ease;
+}
+.box1{
+ width: 100px;
+ height: 100px;
+ background-color: blue;
+ color: yellow;
+ font-size: 20px;
+ left: 0px;
+ top: 0px;
+ position:absolute;
+ transition: background-color .25s;
+ transition-timing-function: ease;
+}
+ </style>
+ </head>
+<body style="background-color: #1f52a5;">
+<div class="box" id="box">Sample</div>
+<div id="immediately" style="display:block;">
+THIS SHOWS IMMEDIATELY
+</div>
+
+<div id="beforeUnload" style="display:none;">
+THIS SHOWS AT BEFOREUNLOAD
+</div>
+
+<div id="transitionEnd" style="display:none;">
+THIS SHOWS AT TRANSITIONEND
+</div>
+
+
+<script>
+ console.log('Running the script in beforeunload.html');
+
+ function updateTransition() {
+ var el = document.getElementById("box");
+ if (el.className == "box") {
+ el.className = "box1";
+ } else {
+ el.className = "box";
+ }
+ }
+
+ function transitionEndFunction() {
+ console.log("transitionend event");
+ // Set this to true to simulate an unresponsive splash screen which does
+ // not call window.close().
+ var unresponsive = true;
+ // This style change is only shown if unresponsive.
+ document.getElementById("transitionEnd").style.display="block";
+ // Comment this out to test out an unresponsive
+ // window.ontransitionend function.
+ if (!unresponsive) {
+ window.close();
+ }
+ }
+ // Either event handler or event listeners should work.
+ window.ontransitionend = transitionEndFunction;
+ //window.addEventListener("transitionend", transitionEndFunction, true);
+
+ function beforeUnloadFunction() {
+ console.log("beforeunload event");
+ document.getElementById("immediately").style.display="none";
+ document.getElementById("beforeUnload").style.display="block";
+ window.updateTransition();
+ // Returning a string shows a confirmation dialog in Chrome.
+ return "returning text is futile";
+ };
+ // Either event handler or event listeners should work.
+ // window.onbeforeunload = beforeUnloadFunction;
+ window.addEventListener("beforeunload", beforeUnloadFunction, true);
+
+ console.log("Ran the script in beforeunload.html");
+</script>
+
+</body>
+</html>
diff --git a/src/cobalt/browser/testdata/splash_screen/block_render_tree_head_body_display_none.html b/src/cobalt/browser/testdata/splash_screen/block_render_tree_head_body_display_none.html
new file mode 100644
index 0000000..f481c4d
--- /dev/null
+++ b/src/cobalt/browser/testdata/splash_screen/block_render_tree_head_body_display_none.html
@@ -0,0 +1,441 @@
+<!DOCTYPE html>
+<html>
+
+<head style="display : none">
+ <meta http-equiv="Content-Security-Policy" content="
+ default-src 'unsafe-inline';
+ style-src 'unsafe-inline';
+ script-src 'unsafe-inline';">
+</head>
+
+<script>
+ window.setTimeout(function() {
+ document.getElementsByTagName('body')[0].style.display = 'block';
+ }, 5000);
+</script>
+
+<body style="background-color: #1f52a5; display: none">
+<h1>Heading</h1>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+</body>
+</html>
diff --git a/src/cobalt/browser/testdata/splash_screen/block_render_tree_html_display_none.html b/src/cobalt/browser/testdata/splash_screen/block_render_tree_html_display_none.html
new file mode 100644
index 0000000..d5f4552
--- /dev/null
+++ b/src/cobalt/browser/testdata/splash_screen/block_render_tree_html_display_none.html
@@ -0,0 +1,440 @@
+<!DOCTYPE html>
+<html style="display: none">
+
+<head>
+ <meta http-equiv="Content-Security-Policy" content="
+ default-src 'unsafe-inline';
+ style-src 'unsafe-inline';
+ script-src 'unsafe-inline';">
+</head>
+
+<script>
+ window.setTimeout(function() {
+ document.getElementsByTagName('html')[0].style.display = 'block';
+ }, 5000);
+</script>
+<body style="background-color: #1f52a5;">
+<h1>Heading</h1>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+interdum maximus finibus. Ut fermentum malesuada commodo. Sed
+faucibus, sapien a mattis lobortis, magna ante efficitur mauris, quis
+sodales nibh diam nec quam. Vestibulum magna libero, tincidunt non
+erat sed, molestie pulvinar ex. Maecenas semper blandit elit, id
+suscipit nulla venenatis pretium. Integer accumsan porta felis, vitae
+placerat urna accumsan vel. Aliquam eu aliquet mi. Aenean tincidunt
+eros lectus, sit amet efficitur orci ultrices at. Morbi lobortis ex
+quis luctus rutrum. In nulla velit, elementum vitae turpis vitae,
+finibus varius massa. Morbi id libero faucibus, tempus eros et,
+ullamcorper ipsum. Sed eleifend finibus bibendum. Nullam ut nunc nec
+lacus posuere dignissim. Nunc sollicitudin vitae augue id
+vulputate. Ut ac nibh gravida, volutpat est ac, facilisis neque.</p>
+
+<p>Nam dictum leo massa, non posuere dui bibendum id. Morbi sagittis est
+non est laoreet, a sollicitudin felis aliquet. Ut cursus vel leo a
+efficitur. Proin ut pellentesque sapien, vel maximus dui. Suspendisse
+eu felis eget leo elementum efficitur. Class aptent taciti sociosqu ad
+litora torquent per conubia nostra, per inceptos himenaeos. Fusce
+lobortis velit in elit pellentesque, ut auctor ipsum dignissim. Sed
+aliquet eleifend convallis. Duis mollis, dolor sed rutrum mollis,
+augue eros dignissim erat, eu dapibus augue turpis ac sapien. Morbi at
+volutpat odio, at molestie risus. Nulla quis nulla et magna vestibulum
+euismod. Praesent suscipit quam elit, non luctus turpis rutrum
+faucibus.</p>
+
+<p>Morbi feugiat lacus rhoncus, dignissim velit nec, dignissim
+lorem. Aliquam erat volutpat. Mauris semper dictum tempus. Nulla ex
+ligula, malesuada in ornare sed, euismod vitae massa. Etiam quis erat
+quis nisl facilisis suscipit. Mauris placerat ante et auctor
+fermentum. Donec tincidunt justo sem, ullamcorper vulputate nisl
+commodo a. Vestibulum quis ex non elit porttitor semper eget quis
+tortor. Suspendisse mattis neque non elementum scelerisque. Nulla
+facilisi. Nulla non felis et justo feugiat elementum. Aenean sodales
+turpis at erat eleifend lacinia. Proin eleifend volutpat purus id
+mollis. Proin vel tellus faucibus, sagittis libero at, lobortis
+odio. Praesent quam mauris, auctor vel velit eu, convallis molestie
+nisi. Pellentesque in nunc at orci ultrices vehicula.</p>
+
+<p>Praesent nibh lectus, efficitur sed risus in, rutrum tristique
+arcu. Curabitur non efficitur elit. Phasellus eget odio iaculis,
+molestie dui eget, venenatis erat. Nulla luctus facilisis lectus, nec
+dapibus tortor rhoncus vel. Donec nec arcu elit. Nullam ut faucibus
+purus, sed ultricies diam. Pellentesque at finibus ipsum. Vestibulum
+egestas dignissim nisl, ac rhoncus risus finibus sit amet. Donec non
+feugiat ante. Donec vehicula dui a lorem imperdiet, a tempus diam
+pulvinar. Nullam congue efficitur justo, non posuere ligula sodales
+in. Ut a urna ornare, ultrices velit in, pellentesque
+lorem. Vestibulum ante ipsum primis in faucibus orci luctus et
+ultrices posuere cubilia Curae;</p>
+
+<p>Orci varius natoque penatibus et magnis dis parturient montes,
+nascetur ridiculus mus. Morbi maximus quis magna et aliquet. Nam
+bibendum fermentum tempus. Praesent iaculis tortor metus, at
+vestibulum ipsum hendrerit mattis. Proin fringilla nisl sit amet
+tincidunt blandit. Interdum et malesuada fames ac ante ipsum primis in
+faucibus. Phasellus vel lectus leo. Curabitur fringilla, arcu non
+posuere viverra, urna metus blandit augue, convallis mattis tortor dui
+vel arcu. In sit amet metus vitae ex rhoncus hendrerit.</p>
+
+</body>
+</html>
diff --git a/src/cobalt/browser/web_module.cc b/src/cobalt/browser/web_module.cc
index 87fc9f2..729697f 100644
--- a/src/cobalt/browser/web_module.cc
+++ b/src/cobalt/browser/web_module.cc
@@ -54,6 +54,7 @@
#include "cobalt/loader/image/animated_image_tracker.h"
#include "cobalt/media_session/media_session_client.h"
#include "cobalt/page_visibility/visibility_state.h"
+#include "cobalt/script/error_report.h"
#include "cobalt/script/javascript_engine.h"
#include "cobalt/storage/storage_manager.h"
#include "starboard/accessibility.h"
@@ -143,6 +144,12 @@
void InjectWheelEvent(scoped_refptr<dom::Element> element, base::Token type,
const dom::WheelEventInit& event);
+ // Called to inject a beforeunload event into the web module. If
+ // this event is not handled by the web application,
+ // on_before_unload_fired_but_not_handled will be called. The event
+ // is not directed at a specific element.
+ void InjectBeforeUnloadEvent();
+
// Called to execute JavaScript in this WebModule. Sets the |result|
// output parameter and signals |got_result|.
void ExecuteJavascript(const std::string& script_utf8,
@@ -197,8 +204,10 @@
void Unpause();
void Resume(render_tree::ResourceProvider* resource_provider);
- void ReportScriptError(const base::SourceLocation& source_location,
- const std::string& error_message);
+ void ReduceMemory();
+
+ void LogScriptError(const base::SourceLocation& source_location,
+ const std::string& error_message);
private:
class DocumentLoadedObserver;
@@ -233,6 +242,10 @@
error_callback_.Run(window_->location()->url(), error);
}
+ // Report an error encountered while running JS.
+ // Returns whether or not the error was handled.
+ bool ReportScriptError(const script::ErrorReport& error_report);
+
// Inject the DOM event object into the window or the element.
void InjectInputEvent(scoped_refptr<dom::Element> element,
const scoped_refptr<dom::Event>& event);
@@ -358,6 +371,8 @@
scoped_ptr<media_session::MediaSessionClient> media_session_client_;
scoped_ptr<layout::TopmostEventTarget> topmost_event_target_;
+
+ base::Closure on_before_unload_fired_but_not_handled;
};
class WebModule::Impl::DocumentLoadedObserver : public dom::DocumentObserver {
@@ -422,6 +437,9 @@
base::Unretained(data.options.splash_screen_cache));
}
+ on_before_unload_fired_but_not_handled =
+ data.options.on_before_unload_fired_but_not_handled;
+
fetcher_factory_.reset(new loader::FetcherFactory(
data.network_module, data.options.extra_web_file_dir,
dom::URL::MakeBlobResolverCallback(blob_registry_.get()),
@@ -475,7 +493,7 @@
#if defined(COBALT_ENABLE_JAVASCRIPT_ERROR_LOGGING)
script::JavaScriptEngine::ErrorHandler error_handler =
- base::Bind(&WebModule::Impl::ReportScriptError, base::Unretained(this));
+ base::Bind(&WebModule::Impl::LogScriptError, base::Unretained(this));
javascript_engine_->RegisterErrorHandler(error_handler);
#endif
@@ -578,6 +596,9 @@
base::Bind(&dom::CspDelegate::ReportEval,
base::Unretained(window_->document()->csp_delegate())));
+ global_environment_->SetReportErrorCallback(
+ base::Bind(&WebModule::Impl::ReportScriptError, base::Unretained(this)));
+
InjectCustomWindowAttributes(data.options.injected_window_attributes);
if (!data.options.loaded_callbacks.empty()) {
@@ -594,6 +615,8 @@
DCHECK(is_running_);
is_running_ = false;
global_environment_->SetReportEvalCallback(base::Closure());
+ global_environment_->SetReportErrorCallback(
+ script::GlobalEnvironment::ReportErrorCallback());
window_->DispatchEvent(new dom::Event(base::Tokens::unload()));
document_load_observer_.reset();
media_session_client_.reset();
@@ -750,6 +773,14 @@
}
}
+bool WebModule::Impl::ReportScriptError(
+ const script::ErrorReport& error_report) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(is_running_);
+ DCHECK(window_);
+ return window_->ReportScriptError(error_report);
+}
+
#if defined(ENABLE_WEBDRIVER)
void WebModule::Impl::CreateWindowDriver(
const webdriver::protocol::WindowId& window_id,
@@ -925,7 +956,23 @@
SetApplicationState(base::kApplicationStatePaused);
}
-void WebModule::Impl::ReportScriptError(
+void WebModule::Impl::ReduceMemory() {
+ TRACE_EVENT0("cobalt::browser", "WebModule::Impl::ReduceMemory()");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!is_running_) {
+ return;
+ }
+
+ PurgeResourceCaches();
+ window_->document()->PurgeCachedResources();
+
+ // Force garbage collection in |javascript_engine_|.
+ if (javascript_engine_) {
+ javascript_engine_->CollectGarbage();
+ }
+}
+
+void WebModule::Impl::LogScriptError(
const base::SourceLocation& source_location,
const std::string& error_message) {
std::string file_name =
@@ -944,6 +991,15 @@
SbLogRaw(ss.str().c_str());
}
+void WebModule::Impl::InjectBeforeUnloadEvent() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (window_ && window_->HasEventListener(base::Tokens::beforeunload())) {
+ window_->DispatchEvent(new dom::Event(base::Tokens::beforeunload()));
+ } else if (!on_before_unload_fired_but_not_handled.is_null()) {
+ on_before_unload_fired_but_not_handled.Run();
+ }
+}
+
void WebModule::Impl::PurgeResourceCaches() {
image_cache_->Purge();
remote_typeface_cache_->Purge();
@@ -1097,6 +1153,15 @@
scoped_refptr<dom::Element>(), type, event));
}
+void WebModule::InjectBeforeUnloadEvent() {
+ TRACE_EVENT0("cobalt::browser", "WebModule::InjectBeforeUnloadEvent()");
+ DCHECK(message_loop());
+ DCHECK(impl_);
+ message_loop()->PostTask(FROM_HERE,
+ base::Bind(&WebModule::Impl::InjectBeforeUnloadEvent,
+ base::Unretained(impl_.get())));
+}
+
std::string WebModule::ExecuteJavascript(
const std::string& script_utf8,
const base::SourceLocation& script_location,
@@ -1287,6 +1352,17 @@
base::Unretained(impl_.get()), resource_provider));
}
+void WebModule::ReduceMemory() {
+ // Must only be called by a thread external from the WebModule thread.
+ DCHECK_NE(MessageLoop::current(), message_loop());
+
+ // We block here so that we block the Low Memory event handler until we have
+ // reduced our memory consumption.
+ message_loop()->PostBlockingTask(
+ FROM_HERE, base::Bind(&WebModule::Impl::ReduceMemory,
+ base::Unretained(impl_.get())));
+}
+
void WebModule::Impl::HandlePointerEvents() {
TRACE_EVENT0("cobalt::browser", "WebModule::Impl::HandlePointerEvents");
const scoped_refptr<dom::Document>& document = window_->document();
diff --git a/src/cobalt/browser/web_module.h b/src/cobalt/browser/web_module.h
index b296d6f..ba23707 100644
--- a/src/cobalt/browser/web_module.h
+++ b/src/cobalt/browser/web_module.h
@@ -185,6 +185,14 @@
// The splash screen cache object, owned by the BrowserModule.
SplashScreenCache* splash_screen_cache;
+ // The beforeunload event can give a web page a chance to shut
+ // itself down softly and ultimately call window.close(), however
+ // if it is not handled by the web application, we indicate this
+ // situation externally by calling this callback, so that if the
+ // beforeunload event was generated it can be known that there is
+ // no window.close() call pending.
+ base::Closure on_before_unload_fired_but_not_handled;
+
// Whether or not the WebModule is allowed to fetch from cache via
// h5vcc-cache://.
bool can_fetch_cache;
@@ -222,6 +230,11 @@
// represents the event name, for example 'wheel'.
void InjectWheelEvent(base::Token type, const dom::WheelEventInit& event);
+ // Call this to inject a beforeunload event into the web module. If
+ // this event is not handled by the web application,
+ // on_before_unload_fired_but_not_handled will be called.
+ void InjectBeforeUnloadEvent();
+
// Call this to execute Javascript code in this web module. The calling
// thread will block until the JavaScript has executed and the output results
// are available.
@@ -262,6 +275,10 @@
void Suspend() OVERRIDE;
void Resume(render_tree::ResourceProvider* resource_provider) OVERRIDE;
+ // Attempt to reduce overall memory consumption. Called in response to a
+ // system indication that memory usage is nearing a critical level.
+ void ReduceMemory();
+
private:
// Data required to construct a WebModule, initialized in the constructor and
// passed to |Initialize|.
diff --git a/src/cobalt/build/build.id b/src/cobalt/build/build.id
index 3c54c85..248bc70 100644
--- a/src/cobalt/build/build.id
+++ b/src/cobalt/build/build.id
@@ -1 +1 @@
-90790
\ No newline at end of file
+93830
\ No newline at end of file
diff --git a/src/cobalt/build/config/base.gypi b/src/cobalt/build/config/base.gypi
index a3243c1..02c5ec2 100644
--- a/src/cobalt/build/config/base.gypi
+++ b/src/cobalt/build/config/base.gypi
@@ -790,10 +790,9 @@
# Clients must copy over all content; to avoid having to copy over extra data, we
# omit the test data
'conditions': [
- ['cobalt_config != "gold" and cobalt_enable_lib == 0', {
+ ['cobalt_config != "gold"', {
'variables' : {
'cobalt_copy_debug_console': 1,
- 'cobalt_copy_test_data': 1,
'enable_about_scheme': 1,
'enable_fake_microphone': 1,
'enable_file_scheme': 1,
@@ -807,7 +806,6 @@
{
'variables' : {
'cobalt_copy_debug_console': 0,
- 'cobalt_copy_test_data': 0,
'enable_about_scheme': 0,
'enable_fake_microphone': 0,
'enable_file_scheme': 0,
@@ -818,5 +816,15 @@
'sb_allows_memory_tracking': 0,
},
}],
+ ['cobalt_config != "gold" and cobalt_enable_lib == 0', {
+ 'variables' : {
+ 'cobalt_copy_test_data': 1,
+ },
+ },
+ {
+ 'variables' : {
+ 'cobalt_copy_test_data': 0,
+ },
+ }],
],
}
diff --git a/src/cobalt/build/config/starboard.py b/src/cobalt/build/config/starboard.py
index ecba441..2af50cd 100644
--- a/src/cobalt/build/config/starboard.py
+++ b/src/cobalt/build/config/starboard.py
@@ -81,9 +81,6 @@
# Cobalt uses OpenSSL on all platforms.
'use_openssl': 1,
'clang': use_clang,
- # Cobalt relies on the Starboard implementation for DRM on all Starboard
- # platforms.
- 'use_widevine': 0,
# Whether to build with clang's Address Sanitizer instrumentation.
'use_asan': use_asan,
# Whether to build with clang's Thread Sanitizer instrumentation.
diff --git a/src/cobalt/debug/debug_web_server.cc b/src/cobalt/debug/debug_web_server.cc
index 6ffd152..baa0fb9 100644
--- a/src/cobalt/debug/debug_web_server.cc
+++ b/src/cobalt/debug/debug_web_server.cc
@@ -30,12 +30,7 @@
#include "net/base/net_errors.h"
#include "net/base/tcp_listen_socket.h"
#include "net/server/http_server_request_info.h"
-
-#if defined(__LB_SHELL__)
-#include "lb_network_helpers.h" // NOLINT[build/include]
-#elif defined(OS_STARBOARD)
#include "starboard/socket.h"
-#endif
namespace cobalt {
namespace debug {
@@ -84,7 +79,6 @@
net::IPEndPoint ip_addr;
SbSocketAddress local_ip;
SbMemorySet(&local_ip, 0, sizeof(local_ip));
-#if SB_API_VERSION >= 4
bool result = false;
// Prefer IPv4 addresses, as they're easier to type for debugging.
@@ -109,19 +103,6 @@
DLOG(WARNING) << "Unable to get a local interface address.";
return base::nullopt;
}
-#else
- bool result = SbSocketGetLocalInterfaceAddress(&local_ip);
- if (!result) {
- DLOG(WARNING) << "Unable to get a local interface address.";
- return base::nullopt;
- }
-
- result = ip_addr.FromSbSocketAddress(&local_ip);
- if (!result) {
- LOG(WARNING) << "Got invalid local interface address.";
- return base::nullopt;
- }
-#endif // SB_API_VERSION >= 4
return ip_addr.ToStringWithoutPort();
}
diff --git a/src/cobalt/dom/custom_event.h b/src/cobalt/dom/custom_event.h
new file mode 100644
index 0000000..615eb6a
--- /dev/null
+++ b/src/cobalt/dom/custom_event.h
@@ -0,0 +1,76 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COBALT_DOM_CUSTOM_EVENT_H_
+#define COBALT_DOM_CUSTOM_EVENT_H_
+
+#include <string>
+
+#include "cobalt/dom/custom_event_init.h"
+#include "cobalt/dom/event.h"
+#include "cobalt/script/value_handle.h"
+
+namespace cobalt {
+namespace dom {
+
+// Events using the CustomEvent interface can be used to carry custom data.
+// https://www.w3.org/TR/2015/REC-dom-20151119/#customevent
+class CustomEvent : public Event {
+ public:
+ explicit CustomEvent(const std::string& type) : Event(type) {}
+ CustomEvent(const std::string& type, const CustomEventInit& init_dict)
+ : Event(type, init_dict) {
+ set_detail(init_dict.detail());
+ }
+
+ // Creates an event with its "initialized flag" unset.
+ explicit CustomEvent(UninitializedFlag uninitialized_flag)
+ : Event(uninitialized_flag) {}
+
+ // Web API: CustomEvent
+ //
+ void InitCustomEvent(const std::string& type, bool bubbles, bool cancelable,
+ const script::ValueHandleHolder& detail) {
+ InitEvent(type, bubbles, cancelable);
+ set_detail(&detail);
+ }
+
+ void set_detail(const script::ValueHandleHolder* detail) {
+ if (detail) {
+ detail_.reset(new script::ValueHandleHolder::Reference(this, *detail));
+ } else {
+ detail_.reset();
+ }
+ }
+
+ const script::ValueHandleHolder* detail() const {
+ if (!detail_) {
+ return NULL;
+ }
+
+ return &(detail_->referenced_value());
+ }
+
+ DEFINE_WRAPPABLE_TYPE(CustomEvent);
+
+ protected:
+ ~CustomEvent() OVERRIDE {}
+
+ scoped_ptr<script::ValueHandleHolder::Reference> detail_;
+};
+
+} // namespace dom
+} // namespace cobalt
+
+#endif // COBALT_DOM_CUSTOM_EVENT_H_
diff --git a/src/cobalt/dom/custom_event.idl b/src/cobalt/dom/custom_event.idl
new file mode 100644
index 0000000..cd4f555
--- /dev/null
+++ b/src/cobalt/dom/custom_event.idl
@@ -0,0 +1,24 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// https://www.w3.org/TR/2015/REC-dom-20151119/#customevent
+[Constructor(DOMString type, optional CustomEventInit eventInitDict)]
+interface CustomEvent : Event {
+ readonly attribute any detail;
+
+ void initCustomEvent(DOMString type,
+ boolean bubbles,
+ boolean cancelable,
+ any detail);
+};
diff --git a/src/cobalt/dom/custom_event_init.idl b/src/cobalt/dom/custom_event_init.idl
new file mode 100644
index 0000000..2967077
--- /dev/null
+++ b/src/cobalt/dom/custom_event_init.idl
@@ -0,0 +1,19 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// https://www.w3.org/TR/dom/#customeventinit
+
+dictionary CustomEventInit : EventInit {
+ any detail = null;
+};
diff --git a/src/cobalt/dom/custom_event_test.cc b/src/cobalt/dom/custom_event_test.cc
new file mode 100644
index 0000000..d8b4c42
--- /dev/null
+++ b/src/cobalt/dom/custom_event_test.cc
@@ -0,0 +1,198 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/dom/custom_event.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "cobalt/css_parser/parser.h"
+#include "cobalt/dom/custom_event_init.h"
+#include "cobalt/dom/local_storage_database.h"
+#include "cobalt/dom/testing/gtest_workarounds.h"
+#include "cobalt/dom/window.h"
+#include "cobalt/dom_parser/parser.h"
+#include "cobalt/loader/fetcher_factory.h"
+#include "cobalt/media/media_module_stub.h"
+#include "cobalt/media_session/media_session.h"
+#include "cobalt/network/network_module.h"
+#include "cobalt/script/global_environment.h"
+#include "cobalt/script/javascript_engine.h"
+#include "cobalt/script/source_code.h"
+#include "cobalt/script/testing/fake_script_value.h"
+#include "cobalt/script/value_handle.h"
+#include "cobalt/script/wrappable.h"
+#include "nb/pointer_arithmetic.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cobalt {
+namespace dom {
+
+using ::cobalt::script::testing::FakeScriptValue;
+
+class MockErrorCallback : public base::Callback<void(const std::string&)> {
+ public:
+ MOCK_METHOD1(Run, void(const std::string&));
+};
+
+namespace {
+class CustomEventTest : public ::testing::Test {
+ public:
+ CustomEventTest()
+ : environment_settings_(new script::EnvironmentSettings),
+ message_loop_(MessageLoop::TYPE_DEFAULT),
+ css_parser_(css_parser::Parser::Create()),
+ dom_parser_(new dom_parser::Parser(mock_error_callback_)),
+ fetcher_factory_(new loader::FetcherFactory(&network_module_)),
+ local_storage_database_(NULL),
+ stub_media_module_(new media::MediaModuleStub()),
+ url_("about:blank"),
+ window_(new Window(
+ 1920, 1080, 1.f, base::kApplicationStateStarted, css_parser_.get(),
+ dom_parser_.get(), fetcher_factory_.get(), NULL, NULL, NULL, NULL,
+ NULL, NULL, &local_storage_database_, stub_media_module_.get(),
+ stub_media_module_.get(), NULL, NULL, NULL, NULL, NULL, url_, "",
+ "en-US", base::Callback<void(const GURL&)>(),
+ base::Bind(&MockErrorCallback::Run,
+ base::Unretained(&mock_error_callback_)),
+ NULL, network_bridge::PostSender(),
+ std::string() /* default security policy */, kCspEnforcementEnable,
+ base::Closure() /* csp_policy_changed */,
+ base::Closure() /* ran_animation_frame_callbacks */,
+ base::Closure() /* window_close */,
+ base::Closure() /* window_minimize */, NULL, NULL)) {
+ engine_ = script::JavaScriptEngine::CreateEngine(
+ script::JavaScriptEngine::Options());
+ global_environment_ = engine_->CreateGlobalEnvironment();
+ global_environment_->CreateGlobalObject(window_,
+ environment_settings_.get());
+ }
+
+ bool EvaluateScript(const std::string& js_code, std::string* result);
+
+ private:
+ scoped_ptr<script::JavaScriptEngine> engine_;
+ scoped_refptr<script::GlobalEnvironment> global_environment_;
+
+ const scoped_ptr<script::EnvironmentSettings> environment_settings_;
+ MessageLoop message_loop_;
+ MockErrorCallback mock_error_callback_;
+ scoped_ptr<css_parser::Parser> css_parser_;
+ scoped_ptr<dom_parser::Parser> dom_parser_;
+ network::NetworkModule network_module_;
+ scoped_ptr<loader::FetcherFactory> fetcher_factory_;
+ dom::LocalStorageDatabase local_storage_database_;
+ scoped_ptr<media::MediaModule> stub_media_module_;
+ GURL url_;
+ const scoped_refptr<Window> window_;
+};
+
+bool CustomEventTest::EvaluateScript(const std::string& js_code,
+ std::string* result) {
+ DCHECK(global_environment_);
+ DCHECK(result);
+ scoped_refptr<script::SourceCode> source_code =
+ script::SourceCode::CreateSourceCode(
+ js_code, base::SourceLocation(__FILE__, __LINE__, 1));
+
+ global_environment_->EnableEval();
+ global_environment_->SetReportEvalCallback(base::Closure());
+ bool succeeded = global_environment_->EvaluateScript(source_code, result);
+ return succeeded;
+}
+} // namespace
+
+TEST_F(CustomEventTest, ConstructorWithEventTypeString) {
+ scoped_refptr<CustomEvent> event = new CustomEvent("mytestevent");
+
+ EXPECT_EQ("mytestevent", event->type());
+ EXPECT_EQ(NULL, event->target());
+ EXPECT_EQ(NULL, event->current_target());
+ EXPECT_EQ(Event::kNone, event->event_phase());
+ EXPECT_FALSE(event->bubbles());
+ EXPECT_FALSE(event->cancelable());
+ EXPECT_FALSE(event->default_prevented());
+ EXPECT_FALSE(event->IsBeingDispatched());
+ EXPECT_FALSE(event->propagation_stopped());
+ EXPECT_FALSE(event->immediate_propagation_stopped());
+ EXPECT_EQ(NULL, event->detail());
+}
+
+TEST_F(CustomEventTest, ConstructorWithEventTypeAndDefaultInitDict) {
+ CustomEventInit init;
+ scoped_refptr<CustomEvent> event = new CustomEvent("mytestevent", init);
+
+ EXPECT_EQ("mytestevent", event->type());
+ EXPECT_EQ(NULL, event->target());
+ EXPECT_EQ(NULL, event->current_target());
+ EXPECT_EQ(Event::kNone, event->event_phase());
+ EXPECT_FALSE(event->bubbles());
+ EXPECT_FALSE(event->cancelable());
+ EXPECT_FALSE(event->default_prevented());
+ EXPECT_FALSE(event->IsBeingDispatched());
+ EXPECT_FALSE(event->propagation_stopped());
+ EXPECT_FALSE(event->immediate_propagation_stopped());
+ EXPECT_EQ(NULL, event->detail());
+}
+
+TEST_F(CustomEventTest, ConstructorWithEventTypeAndCustomInitDict) {
+ std::string result;
+ bool success = EvaluateScript(
+ "var event = new CustomEvent('dog', "
+ " {'bubbles':true, "
+ " 'cancelable':true, "
+ " 'detail':{'cobalt':'rulez'}});"
+ "if (event.type == 'dog' &&"
+ " event.bubbles == true &&"
+ " event.cancelable == true) "
+ " event.detail.cobalt;",
+ &result);
+ EXPECT_EQ("rulez", result);
+
+ if (!success) {
+ DLOG(ERROR) << "Failed to evaluate test: "
+ << "\"" << result << "\"";
+ } else {
+ LOG(INFO) << "Test result : "
+ << "\"" << result << "\"";
+ }
+}
+
+TEST_F(CustomEventTest, InitCustomEvent) {
+ std::string result;
+ bool success = EvaluateScript(
+ "var event = new CustomEvent('cat');\n"
+ "event.initCustomEvent('dog', true, true, {cobalt:'rulez'});"
+ "if (event.type == 'dog' &&"
+ " event.detail &&"
+ " event.bubbles == true &&"
+ " event.cancelable == true) "
+ " event.detail.cobalt;",
+ &result);
+ EXPECT_EQ("rulez", result);
+
+ if (!success) {
+ DLOG(ERROR) << "Failed to evaluate test: "
+ << "\"" << result << "\"";
+ } else {
+ LOG(INFO) << "Test result : "
+ << "\"" << result << "\"";
+ }
+}
+
+} // namespace dom
+} // namespace cobalt
diff --git a/src/cobalt/dom/document.cc b/src/cobalt/dom/document.cc
index ff262c6..ec42f43 100644
--- a/src/cobalt/dom/document.cc
+++ b/src/cobalt/dom/document.cc
@@ -34,6 +34,7 @@
#include "cobalt/dom/comment.h"
#include "cobalt/dom/csp_delegate.h"
#include "cobalt/dom/csp_delegate_factory.h"
+#include "cobalt/dom/custom_event.h"
#include "cobalt/dom/dom_exception.h"
#include "cobalt/dom/dom_implementation.h"
#include "cobalt/dom/element.h"
@@ -240,6 +241,8 @@
} else if (base::strcasecmp(interface_name.c_str(), "uievent") == 0 ||
base::strcasecmp(interface_name.c_str(), "uievents") == 0) {
return new UIEvent(Event::Uninitialized);
+ } else if (base::strcasecmp(interface_name.c_str(), "customevent") == 0) {
+ return new CustomEvent(Event::Uninitialized);
}
DOMException::Raise(
diff --git a/src/cobalt/dom/document_test.cc b/src/cobalt/dom/document_test.cc
index 2a4ee06..7e4c391 100644
--- a/src/cobalt/dom/document_test.cc
+++ b/src/cobalt/dom/document_test.cc
@@ -19,6 +19,7 @@
#include "cobalt/cssom/css_style_sheet.h"
#include "cobalt/dom/attr.h"
#include "cobalt/dom/comment.h"
+#include "cobalt/dom/custom_event.h"
#include "cobalt/dom/dom_exception.h"
#include "cobalt/dom/dom_implementation.h"
#include "cobalt/dom/dom_stat_tracker.h"
@@ -172,6 +173,19 @@
EXPECT_FALSE(event->initialized_flag());
}
+TEST_F(DocumentTest, CreateEventCustomEvent) {
+ StrictMock<MockExceptionState> exception_state;
+ scoped_refptr<script::ScriptException> exception;
+ scoped_refptr<Document> document = new Document(&html_element_context_);
+
+ // Create an Event, the name is case insensitive.
+ scoped_refptr<Event> event =
+ document->CreateEvent("CuStOmEvEnT", &exception_state);
+ EXPECT_TRUE(event);
+ EXPECT_FALSE(event->initialized_flag());
+ EXPECT_TRUE(base::polymorphic_downcast<CustomEvent*>(event.get()));
+}
+
TEST_F(DocumentTest, CreateEventUIEvent) {
StrictMock<MockExceptionState> exception_state;
scoped_refptr<script::ScriptException> exception;
diff --git a/src/cobalt/dom/dom.gyp b/src/cobalt/dom/dom.gyp
index 88ef23d..27a8f29 100644
--- a/src/cobalt/dom/dom.gyp
+++ b/src/cobalt/dom/dom.gyp
@@ -62,6 +62,7 @@
'css_animations_adapter.h',
'css_transitions_adapter.cc',
'css_transitions_adapter.h',
+ 'custom_event.h',
'data_view.cc',
'data_view.h',
'device_orientation_event.cc',
@@ -93,8 +94,10 @@
'dom_token_list.h',
'element.cc',
'element.h',
+ 'error_event.h',
'event.cc',
'event.h',
+ 'event_init.h',
'event_listener.cc',
'event_listener.h',
'event_queue.cc',
@@ -105,6 +108,7 @@
'float64_array.h',
'focus_event.cc',
'focus_event.h',
+ 'focus_event_init.h',
'font_cache.cc',
'font_cache.h',
'font_face.cc',
@@ -164,6 +168,7 @@
'initial_computed_style.h',
'keyboard_event.cc',
'keyboard_event.h',
+ 'keyboard_event_init.h',
'keycode.h',
'keyframes_map_updater.cc',
'keyframes_map_updater.h',
@@ -182,6 +187,7 @@
'mime_type_array.h',
'mouse_event.cc',
'mouse_event.h',
+ 'mouse_event_init.h',
'mutation_observer.cc',
'mutation_observer.h',
'mutation_observer_init.h',
@@ -212,6 +218,7 @@
'plugin_array.h',
'pointer_event.cc',
'pointer_event.h',
+ 'pointer_event_init.h',
'pointer_state.cc',
'pointer_state.h',
'progress_event.cc',
@@ -248,6 +255,7 @@
'typed_array.h',
'ui_event.cc',
'ui_event.h',
+ 'ui_event_init.h',
'ui_event_with_key_state.cc',
'ui_event_with_key_state.h',
'uint8_array.h',
@@ -260,6 +268,7 @@
'video_track_list.h',
'wheel_event.cc',
'wheel_event.h',
+ 'wheel_event_init.h',
'window.cc',
'window.h',
'window_timers.cc',
diff --git a/src/cobalt/dom/dom_test.gyp b/src/cobalt/dom/dom_test.gyp
index 2c41b83..8cf266a 100644
--- a/src/cobalt/dom/dom_test.gyp
+++ b/src/cobalt/dom/dom_test.gyp
@@ -28,6 +28,7 @@
'comment_test.cc',
'crypto_test.cc',
'csp_delegate_test.cc',
+ 'custom_event_test.cc',
'data_view_test.cc',
'document_test.cc',
'document_type_test.cc',
@@ -37,6 +38,7 @@
'dom_string_map_test.cc',
'dom_token_list_test.cc',
'element_test.cc',
+ 'error_event_test.cc',
'event_queue_test.cc',
'event_target_test.cc',
'event_test.cc',
diff --git a/src/cobalt/dom/error_event.h b/src/cobalt/dom/error_event.h
new file mode 100644
index 0000000..d3b3b71
--- /dev/null
+++ b/src/cobalt/dom/error_event.h
@@ -0,0 +1,90 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COBALT_DOM_ERROR_EVENT_H_
+#define COBALT_DOM_ERROR_EVENT_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "cobalt/dom/error_event_init.h"
+#include "cobalt/dom/event.h"
+#include "cobalt/script/value_handle.h"
+
+namespace cobalt {
+namespace dom {
+
+// Whenever an uncaught runtime script error occurs in one of the scripts
+// associated with a Document, the user agent must report the error for the
+// relevant script.
+// https://www.w3.org/TR/html5/webappapis.html#errorevent
+class ErrorEvent : public Event {
+ public:
+ explicit ErrorEvent(const std::string& type)
+ : Event(type), lineno_(0), colno_(0) {}
+ ErrorEvent(const std::string& type, const ErrorEventInit& init_dict)
+ : Event(type, init_dict),
+ message_(init_dict.message()),
+ filename_(init_dict.filename()),
+ lineno_(init_dict.lineno()),
+ colno_(init_dict.colno()) {
+ InitError(init_dict);
+ }
+ ErrorEvent(base::Token type, const ErrorEventInit& init_dict)
+ : Event(type, init_dict),
+ message_(init_dict.message()),
+ filename_(init_dict.filename()),
+ lineno_(init_dict.lineno()),
+ colno_(init_dict.colno()) {
+ InitError(init_dict);
+ }
+
+ // Web API: ErrorEvent
+ //
+ std::string message() const { return message_; }
+ std::string filename() const { return filename_; }
+ uint32 lineno() const { return lineno_; }
+ uint32 colno() const { return colno_; }
+
+ const script::ValueHandleHolder* error() const {
+ if (!error_) {
+ return NULL;
+ }
+ return &(error_->referenced_value());
+ }
+
+ DEFINE_WRAPPABLE_TYPE(ErrorEvent);
+
+ protected:
+ ~ErrorEvent() OVERRIDE {}
+
+ private:
+ void InitError(const ErrorEventInit& init_dict) {
+ const script::ValueHandleHolder* error = init_dict.error();
+ if (error) {
+ error_.reset(new script::ValueHandleHolder::Reference(this, *error));
+ }
+ }
+
+ std::string message_;
+ std::string filename_;
+ uint32 lineno_;
+ uint32 colno_;
+ scoped_ptr<script::ValueHandleHolder::Reference> error_;
+};
+
+} // namespace dom
+} // namespace cobalt
+
+#endif // COBALT_DOM_ERROR_EVENT_H_
diff --git a/src/cobalt/dom/error_event.idl b/src/cobalt/dom/error_event.idl
new file mode 100644
index 0000000..10c4699
--- /dev/null
+++ b/src/cobalt/dom/error_event.idl
@@ -0,0 +1,24 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// https://www.w3.org/TR/html5/webappapis.html#errorevent
+
+[Constructor(DOMString type, optional ErrorEventInit eventInitDict)]
+interface ErrorEvent : Event {
+ readonly attribute DOMString message;
+ readonly attribute DOMString filename;
+ readonly attribute unsigned long lineno;
+ readonly attribute unsigned long colno;
+ readonly attribute any error;
+};
diff --git a/src/cobalt/dom/error_event_init.idl b/src/cobalt/dom/error_event_init.idl
new file mode 100644
index 0000000..77e80c0
--- /dev/null
+++ b/src/cobalt/dom/error_event_init.idl
@@ -0,0 +1,23 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// https://www.w3.org/TR/html5/webappapis.html#erroreventinit
+
+dictionary ErrorEventInit : EventInit {
+ DOMString message = "";
+ DOMString filename = "";
+ unsigned long lineno = 0;
+ unsigned long colno = 0;
+ any error = null;
+};
diff --git a/src/cobalt/dom/error_event_test.cc b/src/cobalt/dom/error_event_test.cc
new file mode 100644
index 0000000..b8ead45
--- /dev/null
+++ b/src/cobalt/dom/error_event_test.cc
@@ -0,0 +1,191 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/dom/error_event.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/memory/scoped_ptr.h"
+#include "cobalt/css_parser/parser.h"
+#include "cobalt/dom/error_event_init.h"
+#include "cobalt/dom/local_storage_database.h"
+#include "cobalt/dom/testing/gtest_workarounds.h"
+#include "cobalt/dom/window.h"
+#include "cobalt/dom_parser/parser.h"
+#include "cobalt/loader/fetcher_factory.h"
+#include "cobalt/media/media_module_stub.h"
+#include "cobalt/media_session/media_session.h"
+#include "cobalt/network/network_module.h"
+#include "cobalt/script/global_environment.h"
+#include "cobalt/script/javascript_engine.h"
+#include "cobalt/script/source_code.h"
+#include "cobalt/script/testing/fake_script_value.h"
+#include "cobalt/script/value_handle.h"
+#include "cobalt/script/wrappable.h"
+#include "nb/pointer_arithmetic.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cobalt {
+namespace dom {
+
+using ::cobalt::script::testing::FakeScriptValue;
+
+class MockErrorCallback : public base::Callback<void(const std::string&)> {
+ public:
+ MOCK_METHOD1(Run, void(const std::string&));
+};
+
+namespace {
+class ErrorEventTest : public ::testing::Test {
+ public:
+ ErrorEventTest()
+ : environment_settings_(new script::EnvironmentSettings),
+ message_loop_(MessageLoop::TYPE_DEFAULT),
+ css_parser_(css_parser::Parser::Create()),
+ dom_parser_(new dom_parser::Parser(mock_error_callback_)),
+ fetcher_factory_(new loader::FetcherFactory(&network_module_)),
+ local_storage_database_(NULL),
+ stub_media_module_(new media::MediaModuleStub()),
+ url_("about:blank"),
+ window_(new Window(
+ 1920, 1080, 1.f, base::kApplicationStateStarted, css_parser_.get(),
+ dom_parser_.get(), fetcher_factory_.get(), NULL, NULL, NULL, NULL,
+ NULL, NULL, &local_storage_database_, stub_media_module_.get(),
+ stub_media_module_.get(), NULL, NULL, NULL, NULL, NULL, url_, "",
+ "en-US", base::Callback<void(const GURL&)>(),
+ base::Bind(&MockErrorCallback::Run,
+ base::Unretained(&mock_error_callback_)),
+ NULL, network_bridge::PostSender(),
+ std::string() /* default security policy */, kCspEnforcementEnable,
+ base::Closure() /* csp_policy_changed */,
+ base::Closure() /* ran_animation_frame_callbacks */,
+ base::Closure() /* window_close */,
+ base::Closure() /* window_minimize */, NULL, NULL)) {
+ engine_ = script::JavaScriptEngine::CreateEngine(
+ script::JavaScriptEngine::Options());
+ global_environment_ = engine_->CreateGlobalEnvironment();
+ global_environment_->CreateGlobalObject(window_,
+ environment_settings_.get());
+ }
+
+ bool EvaluateScript(const std::string& js_code, std::string* result);
+
+ private:
+ scoped_ptr<script::JavaScriptEngine> engine_;
+ scoped_refptr<script::GlobalEnvironment> global_environment_;
+
+ const scoped_ptr<script::EnvironmentSettings> environment_settings_;
+ MessageLoop message_loop_;
+ MockErrorCallback mock_error_callback_;
+ scoped_ptr<css_parser::Parser> css_parser_;
+ scoped_ptr<dom_parser::Parser> dom_parser_;
+ network::NetworkModule network_module_;
+ scoped_ptr<loader::FetcherFactory> fetcher_factory_;
+ dom::LocalStorageDatabase local_storage_database_;
+ scoped_ptr<media::MediaModule> stub_media_module_;
+ GURL url_;
+ const scoped_refptr<Window> window_;
+};
+
+bool ErrorEventTest::EvaluateScript(const std::string& js_code,
+ std::string* result) {
+ DCHECK(global_environment_);
+ DCHECK(result);
+ scoped_refptr<script::SourceCode> source_code =
+ script::SourceCode::CreateSourceCode(
+ js_code, base::SourceLocation(__FILE__, __LINE__, 1));
+
+ global_environment_->EnableEval();
+ global_environment_->SetReportEvalCallback(base::Closure());
+ bool succeeded = global_environment_->EvaluateScript(source_code, result);
+ return succeeded;
+}
+} // namespace
+
+TEST_F(ErrorEventTest, ConstructorWithEventTypeString) {
+ scoped_refptr<ErrorEvent> event = new ErrorEvent("mytestevent");
+
+ EXPECT_EQ("mytestevent", event->type());
+ EXPECT_EQ(NULL, event->target());
+ EXPECT_EQ(NULL, event->current_target());
+ EXPECT_EQ(Event::kNone, event->event_phase());
+ EXPECT_FALSE(event->bubbles());
+ EXPECT_FALSE(event->cancelable());
+ EXPECT_FALSE(event->default_prevented());
+ EXPECT_FALSE(event->IsBeingDispatched());
+ EXPECT_FALSE(event->propagation_stopped());
+ EXPECT_FALSE(event->immediate_propagation_stopped());
+ EXPECT_EQ("", event->message());
+ EXPECT_EQ("", event->filename());
+ EXPECT_EQ(0, event->lineno());
+ EXPECT_EQ(0, event->colno());
+ EXPECT_EQ(NULL, event->error());
+}
+
+TEST_F(ErrorEventTest, ConstructorWithEventTypeAndDefaultInitDict) {
+ ErrorEventInit init;
+ scoped_refptr<ErrorEvent> event = new ErrorEvent("mytestevent", init);
+
+ EXPECT_EQ("mytestevent", event->type());
+ EXPECT_EQ(NULL, event->target());
+ EXPECT_EQ(NULL, event->current_target());
+ EXPECT_EQ(Event::kNone, event->event_phase());
+ EXPECT_FALSE(event->bubbles());
+ EXPECT_FALSE(event->cancelable());
+ EXPECT_FALSE(event->default_prevented());
+ EXPECT_FALSE(event->IsBeingDispatched());
+ EXPECT_FALSE(event->propagation_stopped());
+ EXPECT_FALSE(event->immediate_propagation_stopped());
+ EXPECT_EQ("", event->message());
+ EXPECT_EQ("", event->filename());
+ EXPECT_EQ(0, event->lineno());
+ EXPECT_EQ(0, event->colno());
+ EXPECT_EQ(NULL, event->error());
+}
+
+TEST_F(ErrorEventTest, ConstructorWithEventTypeAndErrorInitDict) {
+ std::string result;
+ bool success = EvaluateScript(
+ "var event = new ErrorEvent('dog', "
+ " {'cancelable':true, "
+ " 'message':'error_message', "
+ " 'filename':'error_filename', "
+ " 'lineno':100, "
+ " 'colno':50, "
+ " 'error':{'cobalt':'rulez'}});"
+ "if (event.type == 'dog' &&"
+ " event.bubbles == false &&"
+ " event.cancelable == true &&"
+ " event.message == 'error_message' &&"
+ " event.filename == 'error_filename' &&"
+ " event.lineno == 100 &&"
+ " event.colno == 50) "
+ " event.error.cobalt;",
+ &result);
+ EXPECT_EQ("rulez", result);
+
+ if (!success) {
+ DLOG(ERROR) << "Failed to evaluate test: "
+ << "\"" << result << "\"";
+ } else {
+ LOG(INFO) << "Test result : "
+ << "\"" << result << "\"";
+ }
+}
+
+} // namespace dom
+} // namespace cobalt
diff --git a/src/cobalt/dom/event_target.cc b/src/cobalt/dom/event_target.cc
index ca6593c..b6a3a8c 100644
--- a/src/cobalt/dom/event_target.cc
+++ b/src/cobalt/dom/event_target.cc
@@ -78,7 +78,6 @@
DCHECK(event->initialized_flag());
TRACE_EVENT1("cobalt::dom", "EventTarget::DispatchEvent", "event",
event->type().c_str());
-
if (!event || event->IsBeingDispatched() || !event->initialized_flag()) {
return false;
}
@@ -223,6 +222,18 @@
new EventListenerInfo(type, this, listener, use_capture, listener_type));
}
+bool EventTarget::HasEventListener(base::Token type) {
+ TRACK_MEMORY_SCOPE("DOM");
+
+ for (EventListenerInfos::iterator iter = event_listener_infos_.begin();
+ iter != event_listener_infos_.end(); ++iter) {
+ if ((*iter)->type == type) {
+ return true;
+ }
+ }
+ return false;
+}
+
EventTarget::EventListenerInfo::EventListenerInfo(
base::Token type, EventTarget* const event_target,
const EventListenerScriptValue& listener, bool use_capture,
diff --git a/src/cobalt/dom/event_target.h b/src/cobalt/dom/event_target.h
index 896667c..4e269f3 100644
--- a/src/cobalt/dom/event_target.h
+++ b/src/cobalt/dom/event_target.h
@@ -78,6 +78,9 @@
const tracked_objects::Location& location, base::Token event_name,
const base::Closure& dispatched_callback);
+ // Check if target has event listener (atrtibute or not attribute).
+ bool HasEventListener(base::Token type);
+
// Web API: GlobalEventHandlers (implements)
// Many objects can have event handlers specified. These act as non-capture
// event listeners for the object on which they are specified.
@@ -338,6 +341,20 @@
SetAttributeEventListener(base::Tokens::timeupdate(), event_listener);
}
+ const EventListenerScriptValue* onbeforeunload() {
+ return GetAttributeEventListener(base::Tokens::beforeunload());
+ }
+ void set_onbeforeunload(const EventListenerScriptValue& event_listener) {
+ SetAttributeEventListener(base::Tokens::beforeunload(), event_listener);
+ }
+
+ const EventListenerScriptValue* ontransitionend() {
+ return GetAttributeEventListener(base::Tokens::transitionend());
+ }
+ void set_ontransitionend(const EventListenerScriptValue& event_listener) {
+ SetAttributeEventListener(base::Tokens::transitionend(), event_listener);
+ }
+
const EventListenerScriptValue* onunload() {
return GetAttributeEventListener(base::Tokens::unload());
}
diff --git a/src/cobalt/dom/global_event_handlers.idl b/src/cobalt/dom/global_event_handlers.idl
index 372f2f7..df852fc 100644
--- a/src/cobalt/dom/global_event_handlers.idl
+++ b/src/cobalt/dom/global_event_handlers.idl
@@ -44,6 +44,8 @@
attribute EventHandler onresize;
+ attribute EventHandler ontransitionend;
+
// Extensions for the Pointer Events recommendation.
// https://www.w3.org/TR/2015/REC-pointerevents-20150224/#extensions-to-the-globaleventhandlers-interface
attribute EventHandler ongotpointercapture;
diff --git a/src/cobalt/dom/html_element.h b/src/cobalt/dom/html_element.h
index e62c335..5806cfc 100644
--- a/src/cobalt/dom/html_element.h
+++ b/src/cobalt/dom/html_element.h
@@ -272,6 +272,10 @@
// https://www.w3.org/TR/SVG11/interact.html#PointerEventsProperty
bool CanbeDesignatedByPointerIfDisplayed() const;
+ // Returns true if this node and all of its ancestors do NOT have display set
+ // to 'none'.
+ bool IsDisplayed() const;
+
DEFINE_WRAPPABLE_TYPE(HTMLElement);
protected:
@@ -330,10 +334,6 @@
// Purge the cached background images on only this node.
void PurgeCachedBackgroundImages();
- // Returns true if this node and all of its ancestors do NOT have display set
- // to 'none'.
- bool IsDisplayed() const;
-
bool locked_for_focus_;
// The directionality of the html element is determined by the 'dir'
diff --git a/src/cobalt/dom/html_media_element.cc b/src/cobalt/dom/html_media_element.cc
index 0a4a3a7..51200ba 100644
--- a/src/cobalt/dom/html_media_element.cc
+++ b/src/cobalt/dom/html_media_element.cc
@@ -207,6 +207,12 @@
std::string HTMLMediaElement::CanPlayType(const std::string& mime_type,
const std::string& key_system) {
+ if (!html_element_context()->can_play_type_handler()) {
+ DLOG(ERROR) << __FUNCTION__ << "(" << mime_type << ", " << key_system
+ << "): Media playback in PRELOADING is not supported.";
+ return "";
+ }
+
#if defined(COBALT_MEDIA_SOURCE_2016)
DLOG_IF(ERROR, !key_system.empty())
<< "CanPlayType() only accepts one parameter but (" << key_system
@@ -732,6 +738,11 @@
}
}
+ if (!html_element_context()->web_media_player_factory()) {
+ DLOG(ERROR) << "Media playback in PRELOADING is not supported.";
+ return;
+ }
+
player_ =
html_element_context()->web_media_player_factory()->CreateWebMediaPlayer(
this);
@@ -1517,7 +1528,7 @@
EndProcessingMediaPlayerCallback();
}
-void HTMLMediaElement::TimeChanged() {
+void HTMLMediaElement::TimeChanged(bool eos_played) {
DCHECK(player_);
if (!player_) {
return;
@@ -1543,8 +1554,9 @@
// When the current playback position reaches the end of the media resource
// when the direction of playback is forwards, then the user agent must follow
// these steps:
- if (!SbDoubleIsNan(dur) && (0.0f != dur) && now >= dur &&
- playback_rate_ > 0) {
+ eos_played |=
+ !SbDoubleIsNan(dur) && (0.0f != dur) && now >= dur && playback_rate_ > 0;
+ if (eos_played) {
// If the media element has a loop attribute specified and does not have a
// current media controller,
if (loop()) {
diff --git a/src/cobalt/dom/html_media_element.h b/src/cobalt/dom/html_media_element.h
index b3d1f0d..31314e3 100644
--- a/src/cobalt/dom/html_media_element.h
+++ b/src/cobalt/dom/html_media_element.h
@@ -231,7 +231,7 @@
// WebMediaPlayerClient methods
void NetworkStateChanged() OVERRIDE;
void ReadyStateChanged() OVERRIDE;
- void TimeChanged() OVERRIDE;
+ void TimeChanged(bool eos_played) OVERRIDE;
void DurationChanged() OVERRIDE;
void OutputModeChanged() OVERRIDE;
void PlaybackStateChanged() OVERRIDE;
diff --git a/src/cobalt/dom/window.cc b/src/cobalt/dom/window.cc
index 0d4e62e..fab5c8c 100644
--- a/src/cobalt/dom/window.cc
+++ b/src/cobalt/dom/window.cc
@@ -28,6 +28,8 @@
#include "cobalt/dom/document.h"
#include "cobalt/dom/dom_settings.h"
#include "cobalt/dom/element.h"
+#include "cobalt/dom/error_event.h"
+#include "cobalt/dom/error_event_init.h"
#include "cobalt/dom/event.h"
#include "cobalt/dom/history.h"
#include "cobalt/dom/html_element.h"
@@ -111,6 +113,7 @@
height_(height),
device_pixel_ratio_(device_pixel_ratio),
is_resize_event_pending_(false),
+ is_reporting_script_error_(false),
#if defined(ENABLE_TEST_RUNNER)
test_runner_(new TestRunner()),
#endif // ENABLE_TEST_RUNNER
@@ -430,6 +433,65 @@
html_element_context_->page_visibility_state()->SetApplicationState(state);
}
+bool Window::ReportScriptError(const script::ErrorReport& error_report) {
+ // Runtime script errors: when the user agent is required to report an error
+ // for a particular script, it must run these steps, after which the error is
+ // either handled or not handled:
+ // https://www.w3.org/TR/html5/webappapis.html#runtime-script-errors
+
+ // 1. If target is in error reporting mode, then abort these steps; the error
+ // is not handled.
+ if (is_reporting_script_error_) {
+ return false;
+ }
+
+ // 2. Let target be in error reporting mode.
+ is_reporting_script_error_ = true;
+
+ // 7. Let event be a new trusted ErrorEvent object that does not bubble but is
+ // cancelable, and which has the event name error.
+ // NOTE: Cobalt does not currently support trusted events.
+ ErrorEventInit error_event_init;
+ error_event_init.set_bubbles(false);
+ error_event_init.set_cancelable(true);
+
+ if (error_report.is_muted) {
+ // 6. If script has muted errors, then set message to "Script error.", set
+ // location to the empty string, set line and col to 0, and set error
+ // object to null.
+ error_event_init.set_message("Script error.");
+ error_event_init.set_filename("");
+ error_event_init.set_lineno(0);
+ error_event_init.set_colno(0);
+ error_event_init.set_error(NULL);
+ } else {
+ // 8. Initialize event's message attribute to message.
+ error_event_init.set_message(error_report.message);
+ // 9. Initialize event's filename attribute to location.
+ error_event_init.set_filename(error_report.filename);
+ // 10. Initialize event's lineno attribute to line.
+ error_event_init.set_lineno(error_report.line_number);
+ // 11. Initialize event's colno attribute to col.
+ error_event_init.set_colno(error_report.column_number);
+ // 12. Initialize event's error attribute to error object.
+ error_event_init.set_error(error_report.error ? error_report.error.get()
+ : NULL);
+ }
+
+ scoped_refptr<ErrorEvent> error_event(
+ new ErrorEvent(base::Tokens::error(), error_event_init));
+
+ // 13. Dispatch event at target.
+ DispatchEvent(error_event);
+
+ // 14. Let target no longer be in error reporting mode.
+ is_reporting_script_error_ = false;
+
+ // 15. If event was canceled, then the error is handled. Otherwise, the error
+ // is not handled.
+ return error_event->default_prevented();
+}
+
void Window::SetSynchronousLayoutCallback(
const base::Closure& synchronous_layout_callback) {
document_->set_synchronous_layout_callback(synchronous_layout_callback);
diff --git a/src/cobalt/dom/window.h b/src/cobalt/dom/window.h
index e3a3f83..9cd4cb6 100644
--- a/src/cobalt/dom/window.h
+++ b/src/cobalt/dom/window.h
@@ -53,6 +53,7 @@
#include "cobalt/page_visibility/page_visibility_state.h"
#include "cobalt/script/callback_function.h"
#include "cobalt/script/environment_settings.h"
+#include "cobalt/script/error_report.h"
#include "cobalt/script/execution_state.h"
#include "cobalt/script/script_runner.h"
#include "cobalt/script/script_value_factory.h"
@@ -310,6 +311,11 @@
// precipitate events to be dispatched.
void SetApplicationState(base::ApplicationState state);
+ // Performs the steps specified for runtime script errors:
+ // https://www.w3.org/TR/html5/webappapis.html#runtime-script-errors
+ // Returns whether or not the script was handled.
+ bool ReportScriptError(const script::ErrorReport& error_report);
+
// page_visibility::PageVisibilityState::Observer implementation.
void OnWindowFocusChanged(bool has_focus) OVERRIDE;
void OnVisibilityStateChanged(
@@ -347,6 +353,11 @@
// visibility state changes to visible.
bool is_resize_event_pending_;
+ // Whether or not the window is currently reporting a script error. This is
+ // used to prevent infinite recursion, because reporting the error causes an
+ // event to be dispatched, which can generate a new script error.
+ bool is_reporting_script_error_;
+
#if defined(ENABLE_TEST_RUNNER)
scoped_refptr<TestRunner> test_runner_;
#endif // ENABLE_TEST_RUNNER
diff --git a/src/cobalt/dom/window_event_handlers.idl b/src/cobalt/dom/window_event_handlers.idl
index 2cda344..bc2bb42 100644
--- a/src/cobalt/dom/window_event_handlers.idl
+++ b/src/cobalt/dom/window_event_handlers.idl
@@ -17,4 +17,5 @@
[NoInterfaceObject]
interface WindowEventHandlers {
attribute EventHandler onunload;
+ attribute EventHandler onbeforeunload;
};
diff --git a/src/cobalt/h5vcc/h5vcc_accessibility.cc b/src/cobalt/h5vcc/h5vcc_accessibility.cc
index 2f9ddb7..7ab5164 100644
--- a/src/cobalt/h5vcc/h5vcc_accessibility.cc
+++ b/src/cobalt/h5vcc/h5vcc_accessibility.cc
@@ -42,7 +42,6 @@
#if SB_HAS(SPEECH_SYNTHESIS)
bool IsTextToSpeechEnabled() {
-#if SB_API_VERSION >= 4
// Check if the tts feature is enabled in Starboard.
SbAccessibilityTextToSpeechSettings tts_settings = {0};
// Check platform settings.
@@ -50,7 +49,7 @@
return tts_settings.has_text_to_speech_setting &&
tts_settings.is_text_to_speech_enabled;
}
-#endif // SB_API_VERSION >= 4
+
return false;
}
#endif // SB_HAS(SPEECH_SYNTHESIS)
@@ -102,7 +101,6 @@
}
bool H5vccAccessibility::high_contrast_text() const {
-#if SB_API_VERSION >= 4
SbAccessibilityDisplaySettings settings;
SbMemorySet(&settings, 0, sizeof(settings));
@@ -111,13 +109,9 @@
}
return settings.is_high_contrast_text_enabled;
-#else // SB_API_VERSION >= 4
- return false;
-#endif // SB_API_VERSION >= 4
}
bool H5vccAccessibility::text_to_speech() const {
-#if SB_API_VERSION >= 4
SbAccessibilityTextToSpeechSettings settings;
SbMemorySet(&settings, 0, sizeof(settings));
@@ -127,9 +121,6 @@
return settings.has_text_to_speech_setting &&
settings.is_text_to_speech_enabled;
-#else // SB_API_VERSION >= 4
- return false;
-#endif // SB_API_VERSION >= 4
}
void H5vccAccessibility::AddHighContrastTextListener(
diff --git a/src/cobalt/layout/box.cc b/src/cobalt/layout/box.cc
index b1e6b70..34a3853 100644
--- a/src/cobalt/layout/box.cc
+++ b/src/cobalt/layout/box.cc
@@ -650,10 +650,10 @@
RenderAndAnimateOverflow(padding_rounded_corners, border_node,
&animate_node_builder, border_box_offset);
}
- border_node = RenderAndAnimateOpacity(border_node, &animate_node_builder,
- opacity, opacity_animated);
border_node = RenderAndAnimateTransform(border_node, &animate_node_builder,
border_box_offset);
+ border_node = RenderAndAnimateOpacity(border_node, &animate_node_builder,
+ opacity, opacity_animated);
cached_render_tree_node_info_->node_ =
animate_node_builder.empty()
diff --git a/src/cobalt/layout/box_generator.cc b/src/cobalt/layout/box_generator.cc
index 5f9c536..dc00344 100644
--- a/src/cobalt/layout/box_generator.cc
+++ b/src/cobalt/layout/box_generator.cc
@@ -64,7 +64,6 @@
scoped_refptr<render_tree::Image> GetVideoFrame(
const scoped_refptr<ShellVideoFrameProvider>& frame_provider,
render_tree::ResourceProvider* resource_provider) {
-#if SB_API_VERSION >= 4
SbDecodeTarget decode_target = frame_provider->GetCurrentSbDecodeTarget();
if (SbDecodeTargetIsValid(decode_target)) {
#if SB_HAS(GRAPHICS)
@@ -72,12 +71,8 @@
#else // SB_HAS(GRAPHICS)
UNREFERENCED_PARAMETER(resource_provider);
return NULL;
-#endif
+#endif // SB_HAS(GRAPHICS)
} else {
-#else // SB_API_VERSION >= 4
- UNREFERENCED_PARAMETER(resource_provider);
- {
-#endif
DCHECK(frame_provider);
scoped_refptr<VideoFrame> video_frame = frame_provider->GetCurrentFrame();
if (video_frame && video_frame->texture_id()) {
diff --git a/src/cobalt/layout/layout_manager.cc b/src/cobalt/layout/layout_manager.cc
index e9b8319..7f0cc65 100644
--- a/src/cobalt/layout/layout_manager.cc
+++ b/src/cobalt/layout/layout_manager.cc
@@ -24,7 +24,9 @@
#include "base/timer.h"
#include "cobalt/cssom/cascade_precedence.h"
#include "cobalt/dom/camera_3d.h"
+#include "cobalt/dom/html_body_element.h"
#include "cobalt/dom/html_element_context.h"
+#include "cobalt/dom/html_head_element.h"
#include "cobalt/dom/html_html_element.h"
#include "cobalt/layout/benchmark_stat_names.h"
#include "cobalt/layout/block_formatting_block_container_box.h"
@@ -77,6 +79,8 @@
const OnLayoutCallback on_layout_callback_;
const LayoutTrigger layout_trigger_;
+ bool produced_render_tree_;
+
// Setting these flags triggers an update of the layout box tree and the
// generation of a new render tree at a regular interval (e.g. 60Hz). Events
// such as DOM mutations cause them to be set to true. While the render tree
@@ -167,6 +171,7 @@
on_render_tree_produced_callback_(on_render_tree_produced),
on_layout_callback_(on_layout),
layout_trigger_(layout_trigger),
+ produced_render_tree_(false),
are_computed_styles_and_box_tree_dirty_(true),
is_render_tree_pending_(
StringPrintf("%s.Layout.IsRenderTreePending", name.c_str()), true,
@@ -366,11 +371,32 @@
are_computed_styles_and_box_tree_dirty_ = false;
}
+ // If no render tree has been produced yet, check if html, head, and
+ // body display should block the first render tree.
+ if (!produced_render_tree_) {
+ bool displayed_html = document->html()->IsDisplayed();
+ if (!displayed_html) {
+ return;
+ }
+ bool displayed_head = true;
+ if (document->head()) {
+ displayed_head = document->head();
+ }
+ bool displayed_body = true;
+ if (document->body()) {
+ displayed_body = document->body();
+ }
+ if (!displayed_head && !displayed_body) {
+ return;
+ }
+ }
+
scoped_refptr<render_tree::Node> render_tree_root =
layout::GenerateRenderTreeFromBoxTree(used_style_provider_.get(),
layout_stat_tracker_,
&initial_containing_block_);
bool run_on_render_tree_produced_callback = true;
+ produced_render_tree_ = true;
#if defined(ENABLE_TEST_RUNNER)
if (layout_trigger_ == kTestRunnerMode &&
window_->test_runner()->should_wait()) {
diff --git a/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements-expected.png b/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements-expected.png
deleted file mode 100644
index b16b41a..0000000
--- a/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements-expected.png
+++ /dev/null
Binary files differ
diff --git a/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements.html b/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements.html
deleted file mode 100644
index 4d9cc35..0000000
--- a/src/cobalt/layout_tests/testdata/cobalt/display_none_set_to_all_elements.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<!--
- | Applying display: none to all elements.
- -->
-<html>
-<head></head>
-<body></body>
-<style>
-* {
- display: none;
-}
-</style>
-
-</html>
diff --git a/src/cobalt/layout_tests/testdata/cobalt/layout_tests.txt b/src/cobalt/layout_tests/testdata/cobalt/layout_tests.txt
index 03292a0..9509933 100644
--- a/src/cobalt/layout_tests/testdata/cobalt/layout_tests.txt
+++ b/src/cobalt/layout_tests/testdata/cobalt/layout_tests.txt
@@ -4,7 +4,6 @@
changing-css-text-triggers-layout
cobalt-oxide, file:///cobalt/browser/testdata/cobalt-oxide/cobalt-oxide.html
console-trace-should-not-crash
-display_none_set_to_all_elements
divs-with-background-color-and-text
fixed-width-divs-with-background-color
font-weight
diff --git a/src/cobalt/layout_tests/testdata/web-platform-tests/XMLHttpRequest/web_platform_tests.txt b/src/cobalt/layout_tests/testdata/web-platform-tests/XMLHttpRequest/web_platform_tests.txt
index 13a4626..87dab78 100644
--- a/src/cobalt/layout_tests/testdata/web-platform-tests/XMLHttpRequest/web_platform_tests.txt
+++ b/src/cobalt/layout_tests/testdata/web-platform-tests/XMLHttpRequest/web_platform_tests.txt
@@ -170,7 +170,7 @@
send-redirect-infinite.htm,PASS
send-redirect-infinite-sync.htm,FAIL
send-redirect-no-location.htm,FAIL
-send-redirect-to-cors.htm,DISABLE
+send-redirect-to-cors.htm,PASS
send-redirect-to-non-cors.htm,FAIL
send-response-event-order.htm,FAIL
send-response-upload-event-loadend.htm,PASS
diff --git a/src/cobalt/loader/error_fetcher.cc b/src/cobalt/loader/error_fetcher.cc
new file mode 100644
index 0000000..caf00e2
--- /dev/null
+++ b/src/cobalt/loader/error_fetcher.cc
@@ -0,0 +1,37 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "cobalt/loader/error_fetcher.h"
+
+#include "base/bind.h"
+#include "base/message_loop.h"
+
+namespace cobalt {
+namespace loader {
+
+ErrorFetcher::ErrorFetcher(Handler* handler, const std::string& error_message)
+ : Fetcher(handler),
+ error_message_(error_message),
+ ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)) {
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ base::Bind(&ErrorFetcher::Fetch, weak_ptr_factory_.GetWeakPtr()));
+}
+
+void ErrorFetcher::Fetch() {
+ handler()->OnError(this, error_message_);
+}
+
+} // namespace loader
+} // namespace cobalt
diff --git a/src/cobalt/loader/error_fetcher.h b/src/cobalt/loader/error_fetcher.h
new file mode 100644
index 0000000..8f7738e
--- /dev/null
+++ b/src/cobalt/loader/error_fetcher.h
@@ -0,0 +1,41 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COBALT_LOADER_ERROR_FETCHER_H_
+#define COBALT_LOADER_ERROR_FETCHER_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "cobalt/loader/fetcher.h"
+
+namespace cobalt {
+namespace loader {
+
+// Always returns an error.
+class ErrorFetcher : public Fetcher {
+ public:
+ ErrorFetcher(Handler* handler, const std::string& error_message_);
+
+ private:
+ void Fetch();
+
+ std::string error_message_;
+ base::WeakPtrFactory<ErrorFetcher> weak_ptr_factory_;
+};
+
+} // namespace loader
+} // namespace cobalt
+
+#endif // COBALT_LOADER_ERROR_FETCHER_H_
diff --git a/src/cobalt/loader/fetcher_factory.cc b/src/cobalt/loader/fetcher_factory.cc
index 1e70cf4..9776365 100644
--- a/src/cobalt/loader/fetcher_factory.cc
+++ b/src/cobalt/loader/fetcher_factory.cc
@@ -14,6 +14,7 @@
#include "cobalt/loader/fetcher_factory.h"
+#include <sstream>
#include <string>
#include "base/bind.h"
@@ -25,6 +26,7 @@
#include "cobalt/loader/blob_fetcher.h"
#include "cobalt/loader/cache_fetcher.h"
#include "cobalt/loader/embedded_fetcher.h"
+#include "cobalt/loader/error_fetcher.h"
#include "cobalt/loader/file_fetcher.h"
#include "cobalt/loader/net_fetcher.h"
#include "cobalt/network/network_module.h"
@@ -37,6 +39,7 @@
const char kAboutScheme[] = "about";
#endif
+#if defined(COBALT_ENABLE_FILE_SCHEME)
bool FileURLToFilePath(const GURL& url, FilePath* file_path) {
DCHECK(url.is_valid() && url.SchemeIsFile());
std::string path = url.path();
@@ -45,6 +48,7 @@
*file_path = FilePath(path);
return !file_path->empty();
}
+#endif
std::string ClipUrl(const GURL& url, size_t length) {
const std::string& spec = url.possibly_invalid_spec();
@@ -94,59 +98,67 @@
scoped_ptr<Fetcher> FetcherFactory::CreateSecureFetcher(
const GURL& url, const csp::SecurityCallback& url_security_callback,
Fetcher::Handler* handler) {
+ DLOG(INFO) << "Fetching: " << ClipUrl(url, 60);
+
if (!url.is_valid()) {
- LOG(ERROR) << "URL is invalid: " << url;
- return scoped_ptr<Fetcher>(NULL);
+ std::stringstream error_message;
+ error_message << "URL is invalid: " << url;
+ return scoped_ptr<Fetcher>(new ErrorFetcher(handler, error_message.str()));
}
- DLOG(INFO) << "Fetching: " << ClipUrl(url, 60);
- scoped_ptr<Fetcher> fetcher;
+ if ((url.SchemeIs("https") || url.SchemeIs("http") ||
+ url.SchemeIs("data")) &&
+ network_module_) {
+ NetFetcher::Options options;
+ return scoped_ptr<Fetcher>(new NetFetcher(url, url_security_callback,
+ handler, network_module_,
+ options));
+ }
+
+ if (url.SchemeIs("blob") && !blob_resolver_.is_null()) {
+ return scoped_ptr<Fetcher>(new BlobFetcher(url, handler, blob_resolver_));
+ }
+
if (url.SchemeIs(kEmbeddedScheme)) {
EmbeddedFetcher::Options options;
- fetcher.reset(
- new EmbeddedFetcher(url, url_security_callback, handler, options));
- } else if (url.SchemeIsFile()) {
+ return scoped_ptr<Fetcher>(new EmbeddedFetcher(url, url_security_callback,
+ handler, options));
+ }
+
+ // h5vcc-cache: scheme requires read_cache_callback_ which is not available
+ // in the main WebModule.
+ if (url.SchemeIs(kCacheScheme) && !read_cache_callback_.is_null()) {
+ return scoped_ptr<Fetcher>(new CacheFetcher(url, url_security_callback,
+ handler,
+ read_cache_callback_));
+ }
+
+#if defined(COBALT_ENABLE_FILE_SCHEME)
+ if (url.SchemeIsFile()) {
FilePath file_path;
- if (FileURLToFilePath(url, &file_path)) {
- FileFetcher::Options options;
- options.message_loop_proxy = file_thread_.message_loop_proxy();
- options.extra_search_dir = extra_search_dir_;
- fetcher.reset(new FileFetcher(file_path, handler, options));
- } else {
- LOG(ERROR) << "File URL cannot be converted to file path: " << url;
- }
- }
-#if defined(ENABLE_ABOUT_SCHEME)
- else if (url.SchemeIs(kAboutScheme)) { // NOLINT(readability/braces)
- fetcher.reset(new AboutFetcher(handler));
- }
-#endif
- else if (url.SchemeIs("blob")) { // NOLINT(readability/braces)
- if (!blob_resolver_.is_null()) {
- fetcher.reset(new BlobFetcher(url, handler, blob_resolver_));
- } else {
- LOG(ERROR) << "Fetcher factory not provided the blob registry, "
- "could not fetch the URL: "
- << url;
- }
- } else if (url.SchemeIs(kCacheScheme)) {
- if (read_cache_callback_.is_null()) {
- LOG(ERROR) << "read_cache_callback_ must be provided to CacheFetcher for "
- "accessing h5vcc-cache:// . This is not available in the "
- "main WebModule.";
- DCHECK(!read_cache_callback_.is_null());
- return fetcher.Pass();
+ if (!FileURLToFilePath(url, &file_path)) {
+ std::stringstream error_message;
+ error_message << "File URL cannot be converted to file path: " << url;
+ return scoped_ptr<Fetcher>(new ErrorFetcher(handler,
+ error_message.str()));
}
- fetcher.reset(new CacheFetcher(url, url_security_callback, handler,
- read_cache_callback_));
- } else { // NOLINT(readability/braces)
- DCHECK(network_module_) << "Network module required.";
- NetFetcher::Options options;
- fetcher.reset(new NetFetcher(url, url_security_callback, handler,
- network_module_, options));
+ FileFetcher::Options options;
+ options.message_loop_proxy = file_thread_.message_loop_proxy();
+ options.extra_search_dir = extra_search_dir_;
+ return scoped_ptr<Fetcher>(new FileFetcher(file_path, handler, options));
}
- return fetcher.Pass();
+#endif
+
+#if defined(ENABLE_ABOUT_SCHEME)
+ if (url.SchemeIs(kAboutScheme)) {
+ return scoped_ptr<Fetcher>(new AboutFetcher(handler));
+ }
+#endif
+
+ std::stringstream error_message;
+ error_message << "Scheme " << url.scheme() << ": is not supported";
+ return scoped_ptr<Fetcher>(new ErrorFetcher(handler, error_message.str()));
}
} // namespace loader
diff --git a/src/cobalt/loader/fetcher_factory_test.cc b/src/cobalt/loader/fetcher_factory_test.cc
index 7e525e1..e39d0a4 100644
--- a/src/cobalt/loader/fetcher_factory_test.cc
+++ b/src/cobalt/loader/fetcher_factory_test.cc
@@ -14,6 +14,7 @@
#include <string>
+#include "base/optional.h"
#include "base/run_loop.h"
#include "cobalt/loader/fetcher_factory.h"
#include "cobalt/loader/file_fetcher.h"
@@ -26,32 +27,32 @@
class StubFetcherHandler : public Fetcher::Handler {
public:
explicit StubFetcherHandler(base::RunLoop* run_loop)
- : fetcher_(NULL), run_loop_(run_loop) {}
+ : run_loop_(run_loop), fetcher_(NULL) {}
// From Fetcher::Handler.
void OnReceived(Fetcher* fetcher, const char* data, size_t size) OVERRIDE {
UNREFERENCED_PARAMETER(data);
UNREFERENCED_PARAMETER(size);
- CheckFetcher(fetcher);
+ CheckSameFetcher(fetcher);
}
void OnDone(Fetcher* fetcher) OVERRIDE {
- CheckFetcher(fetcher);
- if (run_loop_) {
- MessageLoop::current()->PostTask(FROM_HERE, run_loop_->QuitClosure());
- }
+ CheckSameFetcher(fetcher);
+ MessageLoop::current()->PostTask(FROM_HERE, run_loop_->QuitClosure());
}
- void OnError(Fetcher* fetcher, const std::string& error) OVERRIDE {
- UNREFERENCED_PARAMETER(error);
- CheckFetcher(fetcher);
- if (run_loop_) {
- MessageLoop::current()->PostTask(FROM_HERE, run_loop_->QuitClosure());
- }
+ void OnError(Fetcher* fetcher, const std::string& error_message) OVERRIDE {
+ CheckSameFetcher(fetcher);
+ error_message_ = error_message;
+ MessageLoop::current()->PostTask(FROM_HERE, run_loop_->QuitClosure());
}
Fetcher* fetcher() const { return fetcher_; }
+ const base::optional<std::string>& error_message() const {
+ return error_message_;
+ }
+
private:
- void CheckFetcher(Fetcher* fetcher) {
+ void CheckSameFetcher(Fetcher* fetcher) {
EXPECT_TRUE(fetcher);
if (fetcher_ == NULL) {
fetcher_ = fetcher;
@@ -60,8 +61,9 @@
EXPECT_EQ(fetcher_, fetcher);
}
- Fetcher* fetcher_;
base::RunLoop* run_loop_;
+ Fetcher* fetcher_;
+ base::optional<std::string> error_message_;
};
} // namespace
@@ -79,27 +81,42 @@
};
TEST_F(FetcherFactoryTest, InvalidURL) {
- StubFetcherHandler stub_fetcher_handler(NULL);
+ base::RunLoop run_loop;
+ StubFetcherHandler stub_fetcher_handler(&run_loop);
+
fetcher_ = fetcher_factory_.CreateFetcher(GURL("invalid-url"),
&stub_fetcher_handler);
- EXPECT_FALSE(fetcher_.get());
- EXPECT_FALSE(stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(fetcher_);
+
+ run_loop.Run();
+ EXPECT_EQ(fetcher_.get(), stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(stub_fetcher_handler.error_message().has_engaged());
}
TEST_F(FetcherFactoryTest, EmptyFileURL) {
- StubFetcherHandler stub_fetcher_handler(NULL);
+ base::RunLoop run_loop;
+ StubFetcherHandler stub_fetcher_handler(&run_loop);
+
fetcher_ =
fetcher_factory_.CreateFetcher(GURL("file:///"), &stub_fetcher_handler);
- EXPECT_FALSE(fetcher_.get());
- EXPECT_FALSE(stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(fetcher_);
+
+ run_loop.Run();
+ EXPECT_EQ(fetcher_.get(), stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(stub_fetcher_handler.error_message().has_engaged());
}
TEST_F(FetcherFactoryTest, FileURLCannotConvertToFilePath) {
- StubFetcherHandler stub_fetcher_handler(NULL);
+ base::RunLoop run_loop;
+ StubFetcherHandler stub_fetcher_handler(&run_loop);
+
fetcher_ = fetcher_factory_.CreateFetcher(GURL("file://file.txt"),
&stub_fetcher_handler);
- EXPECT_FALSE(fetcher_.get());
- EXPECT_FALSE(stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(fetcher_);
+
+ run_loop.Run();
+ EXPECT_EQ(fetcher_.get(), stub_fetcher_handler.fetcher());
+ EXPECT_TRUE(stub_fetcher_handler.error_message().has_engaged());
}
TEST_F(FetcherFactoryTest, MultipleCreations) {
diff --git a/src/cobalt/loader/image/image_data_decoder.h b/src/cobalt/loader/image/image_data_decoder.h
index 2d648b5..432942a 100644
--- a/src/cobalt/loader/image/image_data_decoder.h
+++ b/src/cobalt/loader/image/image_data_decoder.h
@@ -46,7 +46,7 @@
}
#if defined(STARBOARD)
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
// Starboard version 3 adds support for hardware accelerated image decoding.
// In order to make use of this feature, subclasses of ImageDataDecoder may
// override this method in order to return an SbDecodeTarget, rather than a
@@ -59,7 +59,7 @@
virtual SbDecodeTarget RetrieveSbDecodeTarget() {
return kSbDecodeTargetInvalid;
}
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
#endif // defined(STARBOARD)
void DecodeChunk(const uint8* data, size_t size);
diff --git a/src/cobalt/loader/image/image_decoder.cc b/src/cobalt/loader/image/image_decoder.cc
index 739c523..50f1699 100644
--- a/src/cobalt/loader/image/image_decoder.cc
+++ b/src/cobalt/loader/image/image_decoder.cc
@@ -29,9 +29,7 @@
#include "cobalt/loader/image/webp_image_decoder.h"
#include "net/base/mime_util.h"
#include "net/http/http_status_code.h"
-#if defined(STARBOARD)
#include "starboard/image.h"
-#endif
namespace cobalt {
namespace loader {
@@ -161,15 +159,13 @@
DCHECK(decoder_);
if (decoder_->FinishWithSuccess()) {
if (!decoder_->has_animation()) {
-#if defined(STARBOARD)
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
SbDecodeTarget target = decoder_->RetrieveSbDecodeTarget();
if (SbDecodeTargetIsValid(target)) {
success_callback_.Run(new StaticImage(
resource_provider_->CreateImageFromSbDecodeTarget(target)));
} else // NOLINT
#endif
-#endif
{
scoped_ptr<render_tree::ImageData> image_data =
decoder_->RetrieveImageData();
@@ -271,8 +267,7 @@
}
namespace {
-#if defined(STARBOARD)
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
const char* GetMimeTypeFromImageType(ImageDecoder::ImageType image_type) {
switch (image_type) {
case ImageDecoder::kImageTypeJPEG:
@@ -331,8 +326,7 @@
}
return scoped_ptr<ImageDataDecoder>();
}
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
-#endif // defined(STARBOARD)
+#endif // SB_HAS(GRAPHICS)
scoped_ptr<ImageDataDecoder> CreateImageDecoderFromImageType(
ImageDecoder::ImageType image_type,
@@ -383,12 +377,10 @@
image_type_ = DetermineImageType(signature_cache_.data);
}
-#if defined(STARBOARD)
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
decoder_ =
MaybeCreateStarboardDecoder(mime_type_, image_type_, resource_provider_);
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
-#endif // defined(STARBOARD)
+#endif // SB_HAS(GRAPHICS)
if (!decoder_) {
decoder_ = CreateImageDecoderFromImageType(image_type_, resource_provider_);
diff --git a/src/cobalt/loader/image/image_decoder_starboard.cc b/src/cobalt/loader/image/image_decoder_starboard.cc
index a0518c0..1183eea 100644
--- a/src/cobalt/loader/image/image_decoder_starboard.cc
+++ b/src/cobalt/loader/image/image_decoder_starboard.cc
@@ -23,7 +23,7 @@
#include "starboard/decode_target.h"
#include "starboard/image.h"
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_TRUE && SB_HAS(GRAPHICS)
namespace cobalt {
namespace loader {
@@ -35,11 +35,11 @@
: ImageDataDecoder(resource_provider),
mime_type_(mime_type),
format_(format),
-#if SB_API_VERSION >= 4
+#if SB_TRUE
provider_(resource_provider->GetSbDecodeTargetGraphicsContextProvider()),
-#else // #if SB_API_VERSION >= 4
+#else // #if SB_TRUE
provider_(resource_provider->GetSbDecodeTargetProvider()),
-#endif // #if SB_API_VERSION >= 4
+#endif // #if SB_TRUE
target_(kSbDecodeTargetInvalid) {
TRACE_EVENT0("cobalt::loader::image",
"ImageDecoderStarboard::ImageDecoderStarboard()");
@@ -75,6 +75,6 @@
} // namespace loader
} // namespace cobalt
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#endif // SB_TRUE && SB_HAS(GRAPHICS)
#endif // #if defined(STARBOARD)
diff --git a/src/cobalt/loader/image/image_decoder_starboard.h b/src/cobalt/loader/image/image_decoder_starboard.h
index 39b6cbe..14e386d 100644
--- a/src/cobalt/loader/image/image_decoder_starboard.h
+++ b/src/cobalt/loader/image/image_decoder_starboard.h
@@ -15,8 +15,6 @@
#ifndef COBALT_LOADER_IMAGE_IMAGE_DECODER_STARBOARD_H_
#define COBALT_LOADER_IMAGE_IMAGE_DECODER_STARBOARD_H_
-#if defined(STARBOARD)
-
#include <string>
#include <vector>
@@ -25,7 +23,7 @@
#include "cobalt/loader/image/image_data_decoder.h"
#include "starboard/decode_target.h"
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
namespace cobalt {
namespace loader {
@@ -51,11 +49,7 @@
const char* mime_type_;
SbDecodeTargetFormat format_;
std::vector<uint8> buffer_;
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider* provider_;
-#else // #if SB_API_VERSION >= 4
- SbDecodeTargetProvider* provider_;
-#endif // #if SB_API_VERSION >= 4
SbDecodeTarget target_;
};
@@ -63,8 +57,6 @@
} // namespace loader
} // namespace cobalt
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
-
-#endif // defined(STARBOARD)
+#endif // SB_HAS(GRAPHICS)
#endif // COBALT_LOADER_IMAGE_IMAGE_DECODER_STARBOARD_H_
diff --git a/src/cobalt/loader/loader.gyp b/src/cobalt/loader/loader.gyp
index 2fad2ec..6a07f01 100644
--- a/src/cobalt/loader/loader.gyp
+++ b/src/cobalt/loader/loader.gyp
@@ -28,29 +28,31 @@
'decoder.h',
'embedded_fetcher.cc',
'embedded_fetcher.h',
- 'fetcher.cc',
- 'fetcher.h',
+ 'error_fetcher.cc',
+ 'error_fetcher.h',
'fetcher_factory.cc',
'fetcher_factory.h',
+ 'fetcher.cc',
+ 'fetcher.h',
'file_fetcher.cc',
'file_fetcher.h',
'font/remote_typeface_cache.h',
'font/typeface_decoder.cc',
'font/typeface_decoder.h',
+ 'image/animated_image_tracker.cc',
+ 'image/animated_image_tracker.h',
'image/animated_webp_image.cc',
'image/animated_webp_image.h',
'image/dummy_gif_image_decoder.cc',
'image/dummy_gif_image_decoder.h',
- 'image/image.h',
'image/image_cache.h',
'image/image_data_decoder.cc',
'image/image_data_decoder.h',
- 'image/image_decoder.cc',
- 'image/image_decoder.h',
'image/image_decoder_starboard.cc',
'image/image_decoder_starboard.h',
- 'image/animated_image_tracker.cc',
- 'image/animated_image_tracker.h',
+ 'image/image_decoder.cc',
+ 'image/image_decoder.h',
+ 'image/image.h',
'image/jpeg_image_decoder.cc',
'image/jpeg_image_decoder.h',
'image/png_image_decoder.cc',
@@ -60,11 +62,11 @@
'image/threaded_image_decoder_proxy.h',
'image/webp_image_decoder.cc',
'image/webp_image_decoder.h',
- 'loader.cc',
- 'loader.h',
'loader_factory.cc',
'loader_factory.h',
'loader_types.h',
+ 'loader.cc',
+ 'loader.h',
'mesh/mesh_cache.h',
'mesh/mesh_decoder.cc',
'mesh/mesh_decoder.h',
diff --git a/src/cobalt/media/base/drm_system.h b/src/cobalt/media/base/drm_system.h
index 00dfa4a..b0a26ff 100644
--- a/src/cobalt/media/base/drm_system.h
+++ b/src/cobalt/media/base/drm_system.h
@@ -26,10 +26,6 @@
#include "base/optional.h"
#include "starboard/drm.h"
-#if SB_API_VERSION < 4
-#error "Cobalt media stack requires Starboard 4 or above."
-#endif // SB_API_VERSION < 4
-
namespace cobalt {
namespace media {
@@ -97,7 +93,7 @@
,
SessionUpdateKeyStatusesCallback update_key_statuses_callback
#endif // SB_API_VERSION >= SB_DRM_KEY_STATUSES_UPDATE_SUPPORT_API_VERSION
- );
+ ); // NOLINT(whitespace/parens)
void set_id(const std::string& id) { id_ = id; }
const SessionUpdateRequestGeneratedCallback&
update_request_generated_callback() const {
@@ -133,7 +129,7 @@
#if SB_API_VERSION >= SB_DRM_KEY_STATUSES_UPDATE_SUPPORT_API_VERSION
SessionUpdateKeyStatusesCallback session_update_key_statuses_callback
#endif // SB_API_VERSION >= SB_DRM_KEY_STATUSES_UPDATE_SUPPORT_API_VERSION
- );
+ ); // NOLINT(whitespace/parens)
private:
// Stores context of |GenerateSessionUpdateRequest|.
diff --git a/src/cobalt/media/base/shell_media_platform.h b/src/cobalt/media/base/shell_media_platform.h
index 73510fd..1e6c711 100644
--- a/src/cobalt/media/base/shell_media_platform.h
+++ b/src/cobalt/media/base/shell_media_platform.h
@@ -63,14 +63,10 @@
return NULL;
}
-#if SB_API_VERSION >= 4
virtual SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() {
return NULL;
}
-#elif SB_API_VERSION >= 3
- virtual SbDecodeTargetProvider* GetSbDecodeTargetProvider() { return NULL; }
-#endif // SB_API_VERSION >= 3
// This function is called before the decoder buffer leaves the demuxer and
// is being sent to the media pipeline for decrypting and decoding. The
diff --git a/src/cobalt/media/base/shell_video_frame_provider.h b/src/cobalt/media/base/shell_video_frame_provider.h
index a5bd969..e2b1569 100644
--- a/src/cobalt/media/base/shell_video_frame_provider.h
+++ b/src/cobalt/media/base/shell_video_frame_provider.h
@@ -49,9 +49,7 @@
ShellVideoFrameProvider() : output_mode_(kOutputModeInvalid) {}
-#if SB_API_VERSION >= 4
typedef base::Callback<SbDecodeTarget()> GetCurrentSbDecodeTargetFunction;
-#endif // SB_API_VERSION >= 4
scoped_refptr<VideoFrame> GetCurrentFrame() { return NULL; }
@@ -65,7 +63,6 @@
return output_mode_;
}
-#if SB_API_VERSION >= 4
// For Starboard platforms that have a decode-to-texture player, we enable
// this ShellVideoFrameProvider to act as a bridge for Cobalt code to query
// for the current SbDecodeTarget. In effect, we bypass all of
@@ -91,15 +88,12 @@
return get_current_sb_decode_target_function_.Run();
}
}
-#endif // SB_API_VERSION >= 4
private:
mutable base::Lock lock_;
OutputMode output_mode_;
-#if SB_API_VERSION >= 4
GetCurrentSbDecodeTargetFunction get_current_sb_decode_target_function_;
-#endif // SB_API_VERSION >= 4
DISALLOW_COPY_AND_ASSIGN(ShellVideoFrameProvider);
};
diff --git a/src/cobalt/media/base/starboard_player.cc b/src/cobalt/media/base/starboard_player.cc
index cb8749c..9cf382a 100644
--- a/src/cobalt/media/base/starboard_player.cc
+++ b/src/cobalt/media/base/starboard_player.cc
@@ -96,11 +96,9 @@
DCHECK(host_);
DCHECK(set_bounds_helper_);
-#if SB_API_VERSION >= 4
output_mode_ = ComputeSbPlayerOutputMode(
MediaVideoCodecToSbMediaVideoCodec(video_config.codec()), drm_system,
prefer_decode_to_texture);
-#endif // SB_API_VERSION >= 4
CreatePlayer();
@@ -118,11 +116,9 @@
ShellMediaPlatform::Instance()->GetVideoFrameProvider()->SetOutputMode(
ShellVideoFrameProvider::kOutputModeInvalid);
-#if SB_API_VERSION >= 4
ShellMediaPlatform::Instance()
->GetVideoFrameProvider()
->ResetGetCurrentSbDecodeTargetFunction();
-#endif // SB_API_VERSION >= 4
if (SbPlayerIsValid(player_)) {
SbPlayerDestroy(player_);
@@ -211,13 +207,9 @@
}
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= 4
const int kZIndex = 0;
SbPlayerSetBounds(player_, kZIndex, rect.x(), rect.y(), rect.width(),
rect.height());
-#else // SB_API_VERSION >= 4
- SbPlayerSetBounds(player_, rect.x(), rect.y(), rect.width(), rect.height());
-#endif // SB_API_VERSION >= 4
}
void StarboardPlayer::PrepareForSeek() {
@@ -231,11 +223,7 @@
}
++ticket_;
-#if SB_API_VERSION < 4
- SbPlayerSetPause(player_, true);
-#else // SB_API_VERSION < 4
SbPlayerSetPlaybackRate(player_, 0.f);
-#endif // SB_API_VERSION < 4
}
void StarboardPlayer::Seek(base::TimeDelta time) {
@@ -260,11 +248,7 @@
++ticket_;
SbPlayerSeek(player_, TimeDeltaToSbMediaTime(time), ticket_);
seek_pending_ = false;
-#if SB_API_VERSION < 4
- SbPlayerSetPause(player_, playback_rate_ == 0.0);
-#else // SB_API_VERSION < 4
SbPlayerSetPlaybackRate(player_, playback_rate_);
-#endif // SB_API_VERSION < 4
}
void StarboardPlayer::SetVolume(float volume) {
@@ -295,11 +279,7 @@
return;
}
-#if SB_API_VERSION < 4
- SbPlayerSetPause(player_, playback_rate == 0.0);
-#else // SB_API_VERSION < 4
SbPlayerSetPlaybackRate(player_, playback_rate);
-#endif // SB_API_VERSION < 4
}
void StarboardPlayer::GetInfo(uint32* video_frames_decoded,
@@ -349,11 +329,7 @@
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION < 4
- SbPlayerSetPause(player_, true);
-#else // SB_API_VERSION < 4
SbPlayerSetPlaybackRate(player_, 0.0);
-#endif // SB_API_VERSION < 4
base::AutoLock auto_lock(lock_);
@@ -391,7 +367,6 @@
}
namespace {
-#if SB_API_VERSION >= 4
ShellVideoFrameProvider::OutputMode ToVideoFrameProviderOutputMode(
SbPlayerOutputMode output_mode) {
switch (output_mode) {
@@ -406,7 +381,6 @@
NOTREACHED();
return ShellVideoFrameProvider::kOutputModeInvalid;
}
-#endif // #if SB_API_VERSION >= 4
} // namespace
void StarboardPlayer::CreatePlayer() {
@@ -446,26 +420,17 @@
SbMediaVideoCodec video_codec =
MediaVideoCodecToSbMediaVideoCodec(video_config_.codec());
-#if SB_API_VERSION >= 4
DCHECK(SbPlayerOutputModeSupported(output_mode_, video_codec, drm_system_));
-#endif // SB_API_VERSION >= 4
- player_ = SbPlayerCreate(
- window_, video_codec, audio_codec, SB_PLAYER_NO_DURATION, drm_system_,
- &audio_header, &StarboardPlayer::DeallocateSampleCB,
- &StarboardPlayer::DecoderStatusCB, &StarboardPlayer::PlayerStatusCB, this
-#if SB_API_VERSION >= 4
- ,
- output_mode_,
- ShellMediaPlatform::Instance()->GetSbDecodeTargetGraphicsContextProvider()
-#elif SB_API_VERSION >= 3
- ,
- ShellMediaPlatform::Instance()->GetSbDecodeTargetProvider() // provider
-#endif // SB_API_VERSION >= 3
- );
+ player_ = SbPlayerCreate(window_, video_codec, audio_codec,
+ SB_PLAYER_NO_DURATION, drm_system_, &audio_header,
+ &StarboardPlayer::DeallocateSampleCB,
+ &StarboardPlayer::DecoderStatusCB,
+ &StarboardPlayer::PlayerStatusCB, this, output_mode_,
+ ShellMediaPlatform::Instance()
+ ->GetSbDecodeTargetGraphicsContextProvider());
DCHECK(SbPlayerIsValid(player_));
-#if SB_API_VERSION >= 4
if (output_mode_ == kSbPlayerOutputModeDecodeToTexture) {
// If the player is setup to decode to texture, then provide Cobalt with
// a method of querying that texture.
@@ -477,10 +442,6 @@
}
ShellMediaPlatform::Instance()->GetVideoFrameProvider()->SetOutputMode(
ToVideoFrameProviderOutputMode(output_mode_));
-#else // SB_API_VERSION >= 4
- ShellMediaPlatform::Instance()->GetVideoFrameProvider()->SetOutputMode(
- ShellVideoFrameProvider::kOutputModePunchOut);
-#endif // SB_API_VERSION >= 4
set_bounds_helper_->SetPlayer(this);
@@ -490,14 +451,13 @@
}
}
-#if SB_API_VERSION >= 4
SbDecodeTarget StarboardPlayer::GetCurrentSbDecodeTarget() {
return SbPlayerGetCurrentFrame(player_);
}
+
SbPlayerOutputMode StarboardPlayer::GetSbPlayerOutputMode() {
return output_mode_;
}
-#endif // SB_API_VERSION >= 4
void StarboardPlayer::ClearDecoderBufferCache() {
DCHECK(message_loop_->BelongsToCurrentThread());
@@ -572,11 +532,7 @@
}
SbPlayerSeek(player_, TimeDeltaToSbMediaTime(preroll_timestamp_), ticket_);
SetVolume(volume_);
-#if SB_API_VERSION < 4
- SbPlayerSetPause(player_, playback_rate_ == 0.0);
-#else // SB_API_VERSION < 4
SbPlayerSetPlaybackRate(player_, playback_rate_);
-#endif // SB_API_VERSION < 4
return;
}
host_->OnPlayerStatus(state);
@@ -628,7 +584,6 @@
helper->callback_helper_, sample_buffer));
}
-#if SB_API_VERSION >= 4
// static
SbPlayerOutputMode StarboardPlayer::ComputeSbPlayerOutputMode(
SbMediaVideoCodec codec, SbDrmSystem drm_system,
@@ -658,7 +613,6 @@
return output_mode;
}
-#endif // SB_API_VERSION >= 4
} // namespace media
} // namespace cobalt
diff --git a/src/cobalt/media/base/starboard_player.h b/src/cobalt/media/base/starboard_player.h
index d2c02f1..ad1a31a 100644
--- a/src/cobalt/media/base/starboard_player.h
+++ b/src/cobalt/media/base/starboard_player.h
@@ -73,10 +73,8 @@
void Suspend();
void Resume();
-#if SB_API_VERSION >= 4
SbDecodeTarget GetCurrentSbDecodeTarget();
SbPlayerOutputMode GetSbPlayerOutputMode();
-#endif // SB_API_VERSION >= 4
private:
enum State {
@@ -127,13 +125,11 @@
static void DeallocateSampleCB(SbPlayer player, void* context,
const void* sample_buffer);
-#if SB_API_VERSION >= 4
// Returns the output mode that should be used for a video with the given
// specifications.
static SbPlayerOutputMode ComputeSbPlayerOutputMode(
SbMediaVideoCodec codec, SbDrmSystem drm_system,
bool prefer_decode_to_texture);
-#endif // SB_API_VERSION >= 4
// The following variables are initialized in the ctor and never changed.
const scoped_refptr<base::MessageLoopProxy> message_loop_;
@@ -171,10 +167,8 @@
uint32 cached_video_frames_dropped_;
base::TimeDelta preroll_timestamp_;
-#if SB_API_VERSION >= 4
// Keep track of the output mode we are supposed to output to.
SbPlayerOutputMode output_mode_;
-#endif // SB_API_VERSION >= 4
};
} // namespace media
diff --git a/src/cobalt/media/base/starboard_utils.cc b/src/cobalt/media/base/starboard_utils.cc
index 72c4c72..bc6905a 100644
--- a/src/cobalt/media/base/starboard_utils.cc
+++ b/src/cobalt/media/base/starboard_utils.cc
@@ -128,7 +128,6 @@
}
}
-#if SB_API_VERSION >= 4
// Ensure that the enums in starboard/media.h match enums in gfx::ColorSpace.
#define ENUM_EQ(a, b) \
COMPILE_ASSERT(static_cast<int>(a) == static_cast<int>(b), mismatching_enums)
@@ -279,7 +278,6 @@
return sb_media_color_metadata;
}
-#endif // SB_API_VERSION >= 3
} // namespace media
} // namespace cobalt
diff --git a/src/cobalt/media/base/starboard_utils.h b/src/cobalt/media/base/starboard_utils.h
index 0cb43c1..04196b5 100644
--- a/src/cobalt/media/base/starboard_utils.h
+++ b/src/cobalt/media/base/starboard_utils.h
@@ -39,10 +39,8 @@
SbDrmSampleInfo* drm_info,
SbDrmSubSampleMapping* subsample_mapping);
-#if SB_API_VERSION >= 4
SbMediaColorMetadata MediaToSbMediaColorMetadata(
const WebMColorMetadata& webm_color_metadata);
-#endif
} // namespace media
} // namespace cobalt
diff --git a/src/cobalt/media/player/web_media_player.h b/src/cobalt/media/player/web_media_player.h
index b3b5cb6..beee2aa 100644
--- a/src/cobalt/media/player/web_media_player.h
+++ b/src/cobalt/media/player/web_media_player.h
@@ -194,7 +194,7 @@
public:
virtual void NetworkStateChanged() = 0;
virtual void ReadyStateChanged() = 0;
- virtual void TimeChanged() = 0;
+ virtual void TimeChanged(bool eos_played) = 0;
virtual void DurationChanged() = 0;
virtual void OutputModeChanged() = 0;
virtual void PlaybackStateChanged() = 0;
diff --git a/src/cobalt/media/player/web_media_player_impl.cc b/src/cobalt/media/player/web_media_player_impl.cc
index bd0d14b..c8cf80a 100644
--- a/src/cobalt/media/player/web_media_player_impl.cc
+++ b/src/cobalt/media/player/web_media_player_impl.cc
@@ -590,7 +590,8 @@
// Update our paused time.
if (state_.paused) state_.paused_time = pipeline_->GetMediaTime();
- GetClient()->TimeChanged();
+ const bool eos_played = false;
+ GetClient()->TimeChanged(eos_played);
}
void WebMediaPlayerImpl::OnPipelineEnded(PipelineStatus status) {
@@ -599,7 +600,9 @@
OnPipelineError(status);
return;
}
- GetClient()->TimeChanged();
+
+ const bool eos_played = true;
+ GetClient()->TimeChanged(eos_played);
}
void WebMediaPlayerImpl::OnPipelineError(PipelineStatus error) {
diff --git a/src/cobalt/media/sandbox/web_media_player_helper.cc b/src/cobalt/media/sandbox/web_media_player_helper.cc
index 58618a2..1909a5c 100644
--- a/src/cobalt/media/sandbox/web_media_player_helper.cc
+++ b/src/cobalt/media/sandbox/web_media_player_helper.cc
@@ -35,7 +35,7 @@
// WebMediaPlayerClient methods
void NetworkStateChanged() OVERRIDE {}
void ReadyStateChanged() OVERRIDE {}
- void TimeChanged() OVERRIDE {}
+ void TimeChanged(bool) OVERRIDE {}
void DurationChanged() OVERRIDE {}
void OutputModeChanged() OVERRIDE {}
void PlaybackStateChanged() OVERRIDE {}
@@ -48,9 +48,8 @@
#endif // defined(COBALT_MEDIA_SOURCE_2016)
std::string SourceURL() const OVERRIDE { return ""; }
#if defined(COBALT_MEDIA_SOURCE_2016)
- void EncryptedMediaInitDataEncountered(EmeInitDataType init_data_type,
- const unsigned char* init_data,
- unsigned init_data_length) OVERRIDE {}
+ void EncryptedMediaInitDataEncountered(EmeInitDataType, const unsigned char*,
+ unsigned) OVERRIDE {}
#endif // defined(COBALT_MEDIA_SOURCE_2016)
};
diff --git a/src/cobalt/media/shell_media_platform_starboard.h b/src/cobalt/media/shell_media_platform_starboard.h
index 0119293..8bb5c4c 100644
--- a/src/cobalt/media/shell_media_platform_starboard.h
+++ b/src/cobalt/media/shell_media_platform_starboard.h
@@ -44,7 +44,6 @@
return video_frame_provider_;
}
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() OVERRIDE {
#if SB_HAS(GRAPHICS)
@@ -53,15 +52,6 @@
return NULL;
#endif // SB_HAS(GRAPHICS)
}
-#elif SB_API_VERSION >= 3
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() OVERRIDE {
-#if SB_HAS(GRAPHICS)
- return resource_provider_->GetSbDecodeTargetProvider();
-#else // SB_HAS(GRAPHICS)
- return NULL;
-#endif // SB_HAS(GRAPHICS)
- }
-#endif // SB_API_VERSION >= 4
void Suspend() OVERRIDE { resource_provider_ = NULL; }
void Resume(render_tree::ResourceProvider* resource_provider) OVERRIDE {
@@ -134,7 +124,6 @@
const scoped_refptr<DecoderBuffer>& buffer) OVERRIDE;
bool IsOutputProtected() OVERRIDE;
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() OVERRIDE {
#if SB_HAS(GRAPHICS)
@@ -143,15 +132,6 @@
return NULL;
#endif // SB_HAS(GRAPHICS)
}
-#elif SB_API_VERSION >= 3
- virtual SbDecodeTargetProvider* GetSbDecodeTargetProvider() {
-#if SB_HAS(GRAPHICS)
- return resource_provider_->GetSbDecodeTargetProvider();
-#else // SB_HAS(GRAPHICS)
- return NULL;
-#endif // SB_HAS(GRAPHICS)
- }
-#endif // SB_API_VERSION >= 4
void Suspend() OVERRIDE { resource_provider_ = NULL; }
void Resume(
diff --git a/src/cobalt/network/local_network.cc b/src/cobalt/network/local_network.cc
index 725f3e4..964a54e 100644
--- a/src/cobalt/network/local_network.cc
+++ b/src/cobalt/network/local_network.cc
@@ -23,7 +23,6 @@
namespace {
-#if SB_API_VERSION >= 4
bool CompareNBytesOfAddress(const SbSocketAddress& ip,
const SbSocketAddress& source_address,
const SbSocketAddress& netmask,
@@ -57,22 +56,15 @@
}
}
-#endif // SB_API_VERSION >= 4
-
} // namespace
bool IsIPInLocalNetwork(const SbSocketAddress& destination) {
-#if SB_API_VERSION >= 4
SbSocketAddress source_address;
SbSocketAddress netmask;
if (!(SbSocketGetInterfaceAddress(&destination, &source_address, &netmask))) {
return false;
}
return IsLocalIP(destination, source_address, netmask);
-#else
- UNREFERENCED_PARAMETER(destination);
- return false;
-#endif
}
bool IsIPInPrivateRange(const SbSocketAddress& ip) {
diff --git a/src/cobalt/network/starboard/user_agent_string_factory_starboard.cc b/src/cobalt/network/starboard/user_agent_string_factory_starboard.cc
index c82b19d..3487dd7 100644
--- a/src/cobalt/network/starboard/user_agent_string_factory_starboard.cc
+++ b/src/cobalt/network/starboard/user_agent_string_factory_starboard.cc
@@ -41,9 +41,7 @@
case kSbSystemDeviceTypeOverTheTopBox:
case kSbSystemDeviceTypeSetTopBox:
case kSbSystemDeviceTypeTV:
-#if SB_API_VERSION >= 4
case kSbSystemDeviceTypeAndroidTV:
-#endif // SB_API_VERSION >= 4
return true;
case kSbSystemDeviceTypeDesktopPC:
case kSbSystemDeviceTypeUnknown:
@@ -107,11 +105,9 @@
case kSbSystemDeviceTypeTV:
youtube_tv_info_->device_type = YouTubeTVInfo::kTV;
break;
-#if SB_API_VERSION >= 4
case kSbSystemDeviceTypeAndroidTV:
youtube_tv_info_->device_type = YouTubeTVInfo::kAndroidTV;
break;
-#endif // SB_API_VERSION >= 4
case kSbSystemDeviceTypeDesktopPC:
default:
youtube_tv_info_->device_type = YouTubeTVInfo::kInvalidDeviceType;
diff --git a/src/cobalt/network/user_agent_string_factory.cc b/src/cobalt/network/user_agent_string_factory.cc
index 7ad310b..8bd7815 100644
--- a/src/cobalt/network/user_agent_string_factory.cc
+++ b/src/cobalt/network/user_agent_string_factory.cc
@@ -39,6 +39,28 @@
#error Unknown build configuration.
#endif
+struct SanitizeReplacements {
+ const char* replace_chars;
+ const char* replace_with;
+} kSanitizeReplacements[] = {
+ { ",", u8"\uFF0C" }, // fullwidth comma
+ { "_", u8"\u2E0F" }, // paragraphos
+ { "/", u8"\u2215" }, // division slash
+ { "(", u8"\uFF08" }, // fullwidth left paren
+ { ")", u8"\uFF09" }, // fullwidth right paren
+};
+
+// Replace reserved characters with Unicode homoglyphs
+std::string Sanitize(const std::string& str) {
+ std::string clean(str);
+ for (size_t i=0; i < arraysize(kSanitizeReplacements); i++) {
+ const SanitizeReplacements* replacement = kSanitizeReplacements + i;
+ ReplaceChars(
+ clean, replacement->replace_chars, replacement->replace_with, &clean);
+ }
+ return clean;
+}
+
} // namespace
std::string UserAgentStringFactory::CreateUserAgentString() {
@@ -66,11 +88,12 @@
if (youtube_tv_info_) {
base::StringAppendF(
&user_agent, ", %s_%s_%s/%s (%s, %s, %s)",
- youtube_tv_info_->network_operator.value_or("").c_str(),
+ Sanitize(youtube_tv_info_->network_operator.value_or("")).c_str(),
CreateDeviceTypeString().c_str(),
- youtube_tv_info_->chipset_model_number.value_or("").c_str(),
- youtube_tv_info_->firmware_version.value_or("").c_str(),
- youtube_tv_info_->brand.c_str(), youtube_tv_info_->model.c_str(),
+ Sanitize(youtube_tv_info_->chipset_model_number.value_or("")).c_str(),
+ Sanitize(youtube_tv_info_->firmware_version.value_or("")).c_str(),
+ Sanitize(youtube_tv_info_->brand).c_str(),
+ Sanitize(youtube_tv_info_->model).c_str(),
CreateConnectionTypeString().c_str());
}
@@ -112,10 +135,8 @@
return "STB";
case YouTubeTVInfo::kTV:
return "TV";
-#if SB_API_VERSION >= 4
case YouTubeTVInfo::kAndroidTV:
return "ATV";
-#endif // SB_API_VERSION >= 4
case YouTubeTVInfo::kInvalidDeviceType:
default:
NOTREACHED();
diff --git a/src/cobalt/network/user_agent_string_factory.h b/src/cobalt/network/user_agent_string_factory.h
index 5869855..7f7686f 100644
--- a/src/cobalt/network/user_agent_string_factory.h
+++ b/src/cobalt/network/user_agent_string_factory.h
@@ -47,9 +47,7 @@
struct YouTubeTVInfo {
enum DeviceType {
kInvalidDeviceType,
-#if SB_API_VERSION >= 4
kAndroidTV,
-#endif // SB_API_VERSION >= 4
kBlueRayDiskPlayer,
kGameConsole,
kOverTheTopBox,
diff --git a/src/cobalt/network/user_agent_string_factory_test.cc b/src/cobalt/network/user_agent_string_factory_test.cc
index 6e3a669..80171b8 100644
--- a/src/cobalt/network/user_agent_string_factory_test.cc
+++ b/src/cobalt/network/user_agent_string_factory_test.cc
@@ -82,23 +82,35 @@
class UserAgentStringFactoryWithYouTubeTVInfo : public UserAgentStringFactory {
public:
UserAgentStringFactoryWithYouTubeTVInfo() {
+ // There are deliberately a variety of underscores, commas, slashes, and
+ // parentheses in the strings below to ensure they get sanitized.
os_name_and_version_ = "GLaDOS 3.11";
youtube_tv_info_ = YouTubeTVInfo();
- youtube_tv_info_->network_operator = "ApertureLaboratories";
+ youtube_tv_info_->network_operator = "Aperture_Science_Innovators";
youtube_tv_info_->device_type = YouTubeTVInfo::kOverTheTopBox;
- youtube_tv_info_->chipset_model_number = "Wheatley";
- youtube_tv_info_->firmware_version = "0.01";
- youtube_tv_info_->brand = "Aperture Science";
+ youtube_tv_info_->chipset_model_number = "P-body/Orange_Atlas/Blue";
+ youtube_tv_info_->firmware_version = "0,01";
+ youtube_tv_info_->brand = "Aperture Science (Labs)";
youtube_tv_info_->model = "GLaDOS";
}
};
+// Look-alike replacements expected from sanitizing fields
+#define COMMA u8"\uFF0C" // fullwidth comma
+#define UNDER u8"\u2E0F" // paragraphos
+#define SLASH u8"\u2215" // division slash
+#define LPAREN u8"\uFF08" // fullwidth left paren
+#define RPAREN u8"\uFF09" // fullwidth right paren
+
TEST(UserAgentStringFactoryTest, WithYouTubeTVInfo) {
std::string user_agent_string =
UserAgentStringFactoryWithYouTubeTVInfo().CreateUserAgentString();
- EXPECT_NE(std::string::npos,
- user_agent_string.find("ApertureLaboratories_OTT_Wheatley/0.01 "
- "(Aperture Science, GLaDOS, )"));
+ const char* tv_info_str =
+ "Aperture" UNDER "Science" UNDER "Innovators"
+ "_OTT_"
+ "P-body" SLASH "Orange" UNDER "Atlas" SLASH "Blue"
+ "/0" COMMA "01 (Aperture Science " LPAREN "Labs" RPAREN ", GLaDOS, )";
+ EXPECT_NE(std::string::npos, user_agent_string.find(tv_info_str));
}
class UserAgentStringFactoryWithWiredConnection
diff --git a/src/cobalt/render_tree/mock_resource_provider.h b/src/cobalt/render_tree/mock_resource_provider.h
index 1d13b6c..d30b9fc 100644
--- a/src/cobalt/render_tree/mock_resource_provider.h
+++ b/src/cobalt/render_tree/mock_resource_provider.h
@@ -94,23 +94,17 @@
#if SB_HAS(GRAPHICS)
-#if SB_API_VERSION >= 3
scoped_refptr<Image> CreateImageFromSbDecodeTarget(SbDecodeTarget target) {
UNREFERENCED_PARAMETER(target);
return NULL;
}
bool SupportsSbDecodeTarget() { return false; }
-#endif // SB_API_VERSION >= 3
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() {
return NULL;
}
-#elif SB_API_VERSION >= 3
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() { return NULL; }
-#endif // SB_API_VERSION >= 4
#endif // SB_HAS(GRAPHICS)
diff --git a/src/cobalt/render_tree/resource_provider.h b/src/cobalt/render_tree/resource_provider.h
index 4a648d1..acbbb21 100644
--- a/src/cobalt/render_tree/resource_provider.h
+++ b/src/cobalt/render_tree/resource_provider.h
@@ -27,9 +27,7 @@
#include "cobalt/render_tree/mesh.h"
#include "cobalt/render_tree/node.h"
#include "cobalt/render_tree/typeface.h"
-#if defined(STARBOARD)
#include "starboard/decode_target.h"
-#endif // defined(STARBOARD)
namespace cobalt {
namespace render_tree {
@@ -76,7 +74,6 @@
scoped_ptr<ImageData> pixel_data) = 0;
#if SB_HAS(GRAPHICS)
-#if SB_API_VERSION >= 3
// This function will consume an SbDecodeTarget object produced by
// SbDecodeTargetCreate(), wrap it in a render_tree::Image that can be used
// in a render tree, and return it to the caller.
@@ -85,19 +82,12 @@
// Whether SbDecodeTargetIsSupported or not.
virtual bool SupportsSbDecodeTarget() = 0;
-#endif // SB_API_VERSION >= 3
-#if SB_API_VERSION >= 4
// Return the SbDecodeTargetGraphicsContextProvider associated with the
// ResourceProvider, if it exists. Returns NULL if SbDecodeTarget is not
// supported.
virtual SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() = 0;
-#elif SB_API_VERSION >= 3
- // Return the associated SbDecodeTargetProvider with the ResourceProvider,
- // if it exists. Returns NULL if SbDecodeTarget is not supported.
- virtual SbDecodeTargetProvider* GetSbDecodeTargetProvider() = 0;
-#endif // SB__API_VERSION >= 4
#endif // SB_HAS(GRAPHICS)
// Returns a raw chunk of memory that can later be passed into a function like
diff --git a/src/cobalt/render_tree/resource_provider_stub.h b/src/cobalt/render_tree/resource_provider_stub.h
index aee3f69..397d8cb 100644
--- a/src/cobalt/render_tree/resource_provider_stub.h
+++ b/src/cobalt/render_tree/resource_provider_stub.h
@@ -247,30 +247,22 @@
return make_scoped_refptr(new ImageStub(skia_source_data.Pass()));
}
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
scoped_refptr<Image> CreateImageFromSbDecodeTarget(
SbDecodeTarget decode_target) OVERRIDE {
NOTREACHED();
-#if SB_API_VERSION < 4
- SbDecodeTargetDestroy(decode_target);
-#else // 4
SbDecodeTargetRelease(decode_target);
-#endif // 4
return NULL;
}
bool SupportsSbDecodeTarget() OVERRIDE { return false; }
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
#if SB_HAS(GRAPHICS)
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() OVERRIDE {
return NULL;
}
-#elif SB_API_VERSION >= 3
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() OVERRIDE { return NULL; }
-#endif // SB_API_VERSION >= 4
#endif // SB_HAS(GRAPHICS)
scoped_ptr<RawImageMemory> AllocateRawImageMemory(size_t size_in_bytes,
diff --git a/src/cobalt/render_tree/rounded_corners.cc b/src/cobalt/render_tree/rounded_corners.cc
index b0a3331..7eacb19 100644
--- a/src/cobalt/render_tree/rounded_corners.cc
+++ b/src/cobalt/render_tree/rounded_corners.cc
@@ -17,6 +17,17 @@
namespace cobalt {
namespace render_tree {
+RoundedCorners RoundedCorners::Scale(float sx, float sy) const {
+ return RoundedCorners(RoundedCorner(top_left.horizontal * sx,
+ top_left.vertical * sy),
+ RoundedCorner(top_right.horizontal * sx,
+ top_right.vertical * sy),
+ RoundedCorner(bottom_right.horizontal * sx,
+ bottom_right.vertical * sy),
+ RoundedCorner(bottom_left.horizontal * sx,
+ bottom_left.vertical * sy));
+}
+
RoundedCorners RoundedCorners::Normalize(const math::RectF& rect) const {
float scale = 1.0f;
float size;
@@ -48,14 +59,7 @@
scale = std::min(rect.height() / size, scale);
}
- return RoundedCorners(RoundedCorner(top_left.horizontal * scale,
- top_left.vertical * scale),
- RoundedCorner(top_right.horizontal * scale,
- top_right.vertical * scale),
- RoundedCorner(bottom_right.horizontal * scale,
- bottom_right.vertical * scale),
- RoundedCorner(bottom_left.horizontal * scale,
- bottom_left.vertical * scale));
+ return Scale(scale, scale);
}
bool RoundedCorners::IsNormalized(const math::RectF& rect) const {
diff --git a/src/cobalt/render_tree/rounded_corners.h b/src/cobalt/render_tree/rounded_corners.h
index cf1d03e..7646f65 100644
--- a/src/cobalt/render_tree/rounded_corners.h
+++ b/src/cobalt/render_tree/rounded_corners.h
@@ -91,6 +91,8 @@
return Inset(insets.left(), insets.top(), insets.right(), insets.bottom());
}
+ RoundedCorners Scale(float sx, float sy) const;
+
// Ensure the rounded corners' radii do not exceed the length of the
// corresponding edge of the given rect.
RoundedCorners Normalize(const math::RectF& rect) const;
diff --git a/src/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_masked_texture_domain.glsl b/src/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_masked_texture_domain.glsl
new file mode 100644
index 0000000..168df28
--- /dev/null
+++ b/src/cobalt/renderer/glimp_shaders/glsl/fragment_skia_texture_masked_texture_domain.glsl
@@ -0,0 +1,33 @@
+#version 100
+precision mediump float;
+uniform sampler2D uSampler0_Stage0;
+uniform sampler2D uSampler0_Stage1;
+uniform vec4 uTexDom_Stage1;
+varying vec4 vColor;
+varying vec2 vMatrixCoord_Stage0;
+varying vec2 vMatrixCoord_Stage1;
+
+void main()
+{
+ vec4 output_Stage0;
+ {
+ // Stage 0: Texture
+ output_Stage0 =
+ (vColor * texture2D(uSampler0_Stage0, vMatrixCoord_Stage0).aaaa);
+ }
+ vec4 output_Stage1;
+ {
+ // Stage 1: TextureDomain
+ {
+ bvec4 outside;
+ outside.xy = lessThan(vMatrixCoord_Stage1, uTexDom_Stage1.xy);
+ outside.zw = greaterThan(vMatrixCoord_Stage1, uTexDom_Stage1.zw);
+ output_Stage1 =
+ any(outside) ?
+ vec4(0.0, 0.0, 0.0, 0.0) :
+ (output_Stage0 * texture2D(uSampler0_Stage1,
+ vMatrixCoord_Stage1));
+ }
+ }
+ gl_FragColor = output_Stage1;
+}
diff --git a/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi b/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
index 1997d06..d38588c 100644
--- a/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
+++ b/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
@@ -78,6 +78,7 @@
'fragment_skia_texture_domain.glsl',
'fragment_skia_texture_domain_masked_texture_domain.glsl',
'fragment_skia_texture_masked_texture.glsl',
+ 'fragment_skia_texture_masked_texture_domain.glsl',
'fragment_skia_yuv.glsl',
'fragment_textured_vbo_rgba.glsl',
'fragment_textured_vbo_uyvy_1plane.glsl',
diff --git a/src/cobalt/renderer/rasterizer/blitter/resource_provider.cc b/src/cobalt/renderer/rasterizer/blitter/resource_provider.cc
index 1da3c9e..e66f6e5 100644
--- a/src/cobalt/renderer/rasterizer/blitter/resource_provider.cc
+++ b/src/cobalt/renderer/rasterizer/blitter/resource_provider.cc
@@ -41,9 +41,7 @@
: device_(device),
skia_resource_provider_(skia_resource_provider),
submit_offscreen_callback_(submit_offscreen_callback) {
-#if SB_API_VERSION >= 4
decode_target_graphics_context_provider_.device = device;
-#endif // SB_API_VERSION >= 4
}
bool ResourceProvider::PixelFormatSupported(PixelFormat pixel_format) {
@@ -83,25 +81,10 @@
return make_scoped_refptr(new SinglePlaneImage(blitter_source_data.Pass()));
}
-#if SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
scoped_refptr<render_tree::Image>
ResourceProvider::CreateImageFromSbDecodeTarget(SbDecodeTarget decode_target) {
-#if SB_API_VERSION < 4
- SbDecodeTargetFormat format = SbDecodeTargetGetFormat(decode_target);
- if (format == kSbDecodeTargetFormat1PlaneRGBA) {
- SbBlitterSurface surface =
- SbDecodeTargetGetPlane(decode_target, kSbDecodeTargetPlaneRGBA);
- DCHECK(SbBlitterIsSurfaceValid(surface));
- bool is_opaque = SbDecodeTargetIsOpaque(decode_target);
-
- // Now that we have the surface it contained, we are free to delete
- // |decode_target|.
- SbDecodeTargetDestroy(decode_target);
- return make_scoped_refptr(
- new SinglePlaneImage(surface, is_opaque, base::Closure()));
- }
-#else // SB_API_VERSION < 4
SbDecodeTargetInfo info;
SbMemorySet(&info, 0, sizeof(info));
CHECK(SbDecodeTargetGetInfo(decode_target, &info));
@@ -121,19 +104,14 @@
plane.surface, info.is_opaque,
base::Bind(&SbDecodeTargetRelease, decode_target)));
}
-#endif // SB_API_VERSION < 4
NOTREACHED()
<< "Only format kSbDecodeTargetFormat1PlaneRGBA is currently supported.";
-#if SB_API_VERSION < 4
- SbDecodeTargetDestroy(decode_target);
-#else // SB_API_VERSION < 4
SbDecodeTargetRelease(decode_target);
-#endif // SB_API_VERSION < 4
return NULL;
}
-#endif // SB_API_VERSION >= 3 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
scoped_ptr<render_tree::RawImageMemory>
ResourceProvider::AllocateRawImageMemory(size_t size_in_bytes,
diff --git a/src/cobalt/renderer/rasterizer/blitter/resource_provider.h b/src/cobalt/renderer/rasterizer/blitter/resource_provider.h
index 3f01c64..92315fe 100644
--- a/src/cobalt/renderer/rasterizer/blitter/resource_provider.h
+++ b/src/cobalt/renderer/rasterizer/blitter/resource_provider.h
@@ -43,21 +43,15 @@
void Finish() OVERRIDE {}
-#if SB_API_VERSION >= 3
scoped_refptr<render_tree::Image> CreateImageFromSbDecodeTarget(
SbDecodeTarget decode_target) OVERRIDE;
bool SupportsSbDecodeTarget() OVERRIDE { return true; }
-#endif // SB_API_VERSION >= 3
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider*
GetSbDecodeTargetGraphicsContextProvider() OVERRIDE {
return &decode_target_graphics_context_provider_;
}
-#elif SB_API_VERSION >= 3
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() OVERRIDE { return NULL; }
-#endif // SB_API_VERSION >= 4
bool PixelFormatSupported(render_tree::PixelFormat pixel_format) OVERRIDE;
bool AlphaFormatSupported(render_tree::AlphaFormat alpha_format) OVERRIDE;
@@ -123,10 +117,8 @@
SubmitOffscreenCallback submit_offscreen_callback_;
-#if SB_API_VERSION >= 4
SbDecodeTargetGraphicsContextProvider
decode_target_graphics_context_provider_;
-#endif // SB_API_VERSION >= 4
};
} // namespace blitter
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_object.cc b/src/cobalt/renderer/rasterizer/egl/draw_object.cc
index c2084cf..24ec5d0 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_object.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_object.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <limits>
+#include "cobalt/math/transform_2d.h"
#include "cobalt/renderer/backend/egl/utils.h"
namespace cobalt {
@@ -24,6 +25,34 @@
namespace rasterizer {
namespace egl {
+namespace {
+// To accommodate large radius values for rounded corners while using mediump
+// floats in the fragment shader, scale 1 / radius.xy by kRCornerGradientScale.
+// The fragment shader will take this into account.
+const float kRCornerGradientScale = 16.0f;
+
+// Get the midpoint of the given rounded rect. A normalized rounded rect
+// should have at least one point which the corners do not cross.
+math::PointF GetRRectCenter(const math::RectF& rect,
+ const render_tree::RoundedCorners& corners) {
+ return math::PointF(
+ 0.5f * (rect.x() + std::max(corners.top_left.horizontal,
+ corners.bottom_left.horizontal) +
+ rect.right() - std::max(corners.top_right.horizontal,
+ corners.bottom_right.horizontal)),
+ 0.5f * (rect.y() + std::max(corners.top_left.vertical,
+ corners.top_right.vertical) +
+ rect.bottom() - std::max(corners.bottom_left.vertical,
+ corners.bottom_right.vertical)));
+}
+
+math::PointF ClampToBounds(const math::RectF& bounds, float x, float y) {
+ return math::PointF(
+ std::min(std::max(bounds.x(), x), bounds.right()),
+ std::min(std::max(bounds.y(), y), bounds.bottom()));
+}
+} // namespace
+
DrawObject::BaseState::BaseState()
: transform(math::Matrix3F::Identity()),
scissor(0, 0,
@@ -38,9 +67,35 @@
rounded_scissor_corners(other.rounded_scissor_corners),
opacity(other.opacity) {}
+DrawObject::RCorner::RCorner(const float (&position)[2], const RCorner& init)
+ : x((position[0] - init.x) * init.rx),
+ y((position[1] - init.y) * init.ry),
+ rx(init.rx * kRCornerGradientScale),
+ ry(init.ry * kRCornerGradientScale) {}
+
DrawObject::DrawObject(const BaseState& base_state)
: base_state_(base_state) {}
+math::Vector2dF DrawObject::GetScale() const {
+ float m00 = base_state_.transform(0, 0);
+ float m01 = base_state_.transform(0, 1);
+ float m10 = base_state_.transform(1, 0);
+ float m11 = base_state_.transform(1, 1);
+ return math::Vector2dF(std::sqrt(m00 * m00 + m10 * m10),
+ std::sqrt(m01 * m01 + m11 * m11));
+}
+
+math::Vector2dF DrawObject::RemoveScaleFromTransform() {
+ // Avoid division by zero.
+ const float kEpsilon = 0.00001f;
+
+ math::Vector2dF scale = GetScale();
+ base_state_.transform = base_state_.transform *
+ math::ScaleMatrix(1.0f / std::max(scale.x(), kEpsilon),
+ 1.0f / std::max(scale.y(), kEpsilon));
+ return scale;
+}
+
// static
uint32_t DrawObject::GetGLRGBA(float r, float g, float b, float a) {
// Ensure color bytes represent RGBA, regardless of endianness.
@@ -56,74 +111,130 @@
}
// static
-void DrawObject::SetRRectUniforms(GLint rect_uniform, GLint corners_uniform,
- const math::RectF& rect, const render_tree::RoundedCorners& corners,
- float inset) {
- math::RectF inset_rect(rect);
- inset_rect.Inset(inset, inset);
- render_tree::RoundedCorners inset_corners =
- corners.Inset(inset, inset, inset, inset);
+void DrawObject::GetRRectAttributes(const math::RectF& bounds,
+ math::RectF rect, render_tree::RoundedCorners corners,
+ RRectAttributes (&out_attributes)[4]) {
+ GetRCornerValues(&rect, &corners, out_attributes);
- // Tweak corners that are square-ish so they have values that play
- // nicely with the shader. Interpolating x^2 / a^2 + y^2 / b^2 does not
- // work well when |a| or |b| are very small.
- if (inset_corners.top_left.horizontal <= 0.5f ||
- inset_corners.top_left.vertical <= 0.5f) {
- inset_corners.top_left.horizontal = 0.0f;
- inset_corners.top_left.vertical = 0.0f;
- }
- if (inset_corners.top_right.horizontal <= 0.5f ||
- inset_corners.top_right.vertical <= 0.5f) {
- inset_corners.top_right.horizontal = 0.0f;
- inset_corners.top_right.vertical = 0.0f;
- }
- if (inset_corners.bottom_left.horizontal <= 0.5f ||
- inset_corners.bottom_left.vertical <= 0.5f) {
- inset_corners.bottom_left.horizontal = 0.0f;
- inset_corners.bottom_left.vertical = 0.0f;
- }
- if (inset_corners.bottom_right.horizontal <= 0.5f ||
- inset_corners.bottom_right.vertical <= 0.5f) {
- inset_corners.bottom_right.horizontal = 0.0f;
- inset_corners.bottom_right.vertical = 0.0f;
- }
+ // Calculate the bounds for each patch. Four patches will be used to cover
+ // the entire bounded area.
+ math::PointF center = GetRRectCenter(rect, corners);
+ center = ClampToBounds(bounds, center.x(), center.y());
+ out_attributes[0].bounds.SetRect(bounds.x(), bounds.y(),
+ center.x() - bounds.x(), center.y() - bounds.y());
+ out_attributes[1].bounds.SetRect(center.x(), bounds.y(),
+ bounds.right() - center.x(), center.y() - bounds.y());
+ out_attributes[2].bounds.SetRect(bounds.x(), center.y(),
+ center.x() - bounds.x(), bounds.bottom() - center.y());
+ out_attributes[3].bounds.SetRect(center.x(), center.y(),
+ bounds.right() - center.x(), bounds.bottom() - center.y());
+}
+
+// static
+void DrawObject::GetRRectAttributes(const math::RectF& bounds,
+ math::RectF rect, render_tree::RoundedCorners corners,
+ RRectAttributes (&out_attributes)[8]) {
+ GetRCornerValues(&rect, &corners, out_attributes);
+ out_attributes[4].rcorner = out_attributes[0].rcorner;
+ out_attributes[5].rcorner = out_attributes[1].rcorner;
+ out_attributes[6].rcorner = out_attributes[2].rcorner;
+ out_attributes[7].rcorner = out_attributes[3].rcorner;
+
+ // Given an ellipse with radii A and B, the largest inscribed rectangle has
+ // dimensions sqrt(2) * A and sqrt(2) * B. To accommodate the antialiased
+ // edge, inset the inscribed rect by a pixel on each side.
+ const float kInsetScale = 0.2929f; // 1 - sqrt(2) / 2
+
+ // Calculate the bounds for each patch. Eight patches will be used to exclude
+ // the inscribed rect:
+ // +---+-----+-----+---+
+ // | | 4 | 5 | |
+ // | 0 +-----+-----+ 1 |
+ // | | | |
+ // +---+ C +---+ C = center point
+ // | | | |
+ // | 2 +-----+-----+ 3 |
+ // | | 6 | 7 | |
+ // +---+-----+-----+---+
+ math::PointF center = GetRRectCenter(rect, corners);
+ center = ClampToBounds(bounds, center.x(), center.y());
+ math::PointF inset0 = ClampToBounds(bounds,
+ rect.x() + kInsetScale * corners.top_left.horizontal + 1.0f,
+ rect.y() + kInsetScale * corners.top_left.vertical + 1.0f);
+ math::PointF inset1 = ClampToBounds(bounds,
+ rect.right() - kInsetScale * corners.top_right.horizontal - 1.0f,
+ rect.y() + kInsetScale * corners.top_right.vertical + 1.0f);
+ math::PointF inset2 = ClampToBounds(bounds,
+ rect.x() + kInsetScale * corners.bottom_left.horizontal + 1.0f,
+ rect.bottom() - kInsetScale * corners.bottom_left.vertical - 1.0f);
+ math::PointF inset3 = ClampToBounds(bounds,
+ rect.right() - kInsetScale * corners.bottom_right.horizontal - 1.0f,
+ rect.bottom() - kInsetScale * corners.bottom_right.vertical - 1.0f);
+
+ out_attributes[0].bounds.SetRect(bounds.x(), bounds.y(),
+ inset0.x() - bounds.x(), center.y() - bounds.y());
+ out_attributes[1].bounds.SetRect(inset1.x(), bounds.y(),
+ bounds.right() - inset1.x(), center.y() - bounds.y());
+ out_attributes[2].bounds.SetRect(bounds.x(), center.y(),
+ inset2.x() - bounds.x(), bounds.bottom() - center.y());
+ out_attributes[3].bounds.SetRect(inset3.x(), center.y(),
+ bounds.right() - inset3.x(), bounds.bottom() - center.y());
+ out_attributes[4].bounds.SetRect(inset0.x(), bounds.y(),
+ center.x() - inset0.x(), inset0.y() - bounds.y());
+ out_attributes[5].bounds.SetRect(center.x(), bounds.y(),
+ inset1.x() - center.x(), inset1.y() - bounds.y());
+ out_attributes[6].bounds.SetRect(inset2.x(), inset2.y(),
+ center.x() - inset2.x(), bounds.bottom() - inset2.y());
+ out_attributes[7].bounds.SetRect(center.x(), inset3.y(),
+ inset3.x() - center.x(), bounds.bottom() - inset3.y());
+}
+
+// static
+void DrawObject::GetRCornerValues(math::RectF* rect,
+ render_tree::RoundedCorners* corners, RRectAttributes out_rcorners[4]) {
// Ensure corner sizes are non-zero to allow generic handling of square and
- // rounded corners.
- const float kMinCornerSize = 0.01f;
- inset_rect.Outset(kMinCornerSize, kMinCornerSize);
- inset_corners = inset_corners.Inset(-kMinCornerSize, -kMinCornerSize,
- -kMinCornerSize, -kMinCornerSize);
- inset_corners = inset_corners.Normalize(inset_rect);
+ // rounded corners. Corner radii must be at least 1 pixel for antialiasing
+ // to work well.
+ const float kMinCornerSize = 1.0f;
- // The rect data is a vec4 representing (min.xy, max.xy).
- float rect_data[4] = {
- inset_rect.x(), inset_rect.y(), inset_rect.right(), inset_rect.bottom(),
- };
- GL_CALL(glUniform4fv(rect_uniform, 1, rect_data));
+ // First inset to make room for the minimum corner size. Then outset to
+ // enforce the minimum corner size. Be careful not to inset more than the
+ // rect size, otherwise the outset rect will be off-centered.
+ rect->Inset(std::min(rect->width() * 0.5f, kMinCornerSize),
+ std::min(rect->height() * 0.5f, kMinCornerSize));
+ *corners = corners->Inset(kMinCornerSize, kMinCornerSize, kMinCornerSize,
+ kMinCornerSize);
+ *corners = corners->Normalize(*rect);
+ rect->Outset(kMinCornerSize, kMinCornerSize);
+ *corners = corners->Inset(-kMinCornerSize, -kMinCornerSize, -kMinCornerSize,
+ -kMinCornerSize);
- // The corners data is a mat4 with each vector representing a corner
- // (ordered top left, top right, bottom left, bottom right). Each corner
- // vec4 represents (start.xy, radius.xy).
- float corners_data[16] = {
- inset_rect.x() + inset_corners.top_left.horizontal,
- inset_rect.y() + inset_corners.top_left.vertical,
- inset_corners.top_left.horizontal,
- inset_corners.top_left.vertical,
- inset_rect.right() - inset_corners.top_right.horizontal,
- inset_rect.y() + inset_corners.top_right.vertical,
- inset_corners.top_right.horizontal,
- inset_corners.top_right.vertical,
- inset_rect.x() + inset_corners.bottom_left.horizontal,
- inset_rect.bottom() - inset_corners.bottom_left.vertical,
- inset_corners.bottom_left.horizontal,
- inset_corners.bottom_left.vertical,
- inset_rect.right() - inset_corners.bottom_right.horizontal,
- inset_rect.bottom() - inset_corners.bottom_right.vertical,
- inset_corners.bottom_right.horizontal,
- inset_corners.bottom_right.vertical,
- };
- GL_CALL(glUniformMatrix4fv(corners_uniform, 1, false, corners_data));
+ // |rcorner| describes (start.xy, 1 / radius.xy) for the relevant corner.
+ // The sign of the radius component is used to facilitate the calculation:
+ // vec2 scaled_offset = (position - corner.xy) * corner.zw
+ // Such that |scaled_offset| is in the first quadrant when the pixel is
+ // in the given rounded corner.
+ COMPILE_ASSERT(sizeof(RCorner) == sizeof(float) * 4, struct_should_be_vec4);
+ out_rcorners[0].rcorner.x = rect->x() + corners->top_left.horizontal;
+ out_rcorners[0].rcorner.y = rect->y() + corners->top_left.vertical;
+ out_rcorners[0].rcorner.rx = -1.0f / corners->top_left.horizontal;
+ out_rcorners[0].rcorner.ry = -1.0f / corners->top_left.vertical;
+
+ out_rcorners[1].rcorner.x = rect->right() - corners->top_right.horizontal;
+ out_rcorners[1].rcorner.y = rect->y() + corners->top_right.vertical;
+ out_rcorners[1].rcorner.rx = 1.0f / corners->top_right.horizontal;
+ out_rcorners[1].rcorner.ry = -1.0f / corners->top_right.vertical;
+
+ out_rcorners[2].rcorner.x = rect->x() + corners->bottom_left.horizontal;
+ out_rcorners[2].rcorner.y = rect->bottom() - corners->bottom_left.vertical;
+ out_rcorners[2].rcorner.rx = -1.0f / corners->bottom_left.horizontal;
+ out_rcorners[2].rcorner.ry = 1.0f / corners->bottom_left.vertical;
+
+ out_rcorners[3].rcorner.x = rect->right() - corners->bottom_right.horizontal;
+ out_rcorners[3].rcorner.y = rect->bottom() - corners->bottom_right.vertical;
+ out_rcorners[3].rcorner.rx = 1.0f / corners->bottom_right.horizontal;
+ out_rcorners[3].rcorner.ry = 1.0f / corners->bottom_right.vertical;
}
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_object.h b/src/cobalt/renderer/rasterizer/egl/draw_object.h
index 1e2fb12..19af454 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_object.h
+++ b/src/cobalt/renderer/rasterizer/egl/draw_object.h
@@ -90,9 +90,32 @@
virtual base::TypeId GetTypeId() const = 0;
protected:
+ // Structures describing vertex data for rendering rounded rectangles.
+ struct RCorner {
+ // Constructor to transform a RCorner value into a format which the
+ // shader function IsOutsideRCorner() expects. This expresses the current
+ // vertex position as a scaled offset relevant for the corner, and provides
+ // scalars to assist in calculating the antialiased edge. For more details,
+ // see function_is_outside_rcorner.inc.
+ RCorner(const float (&position)[2], const RCorner& init);
+ RCorner() {}
+ float x, y;
+ float rx, ry;
+ };
+ struct RRectAttributes {
+ math::RectF bounds; // The region in which to use the rcorner data.
+ RCorner rcorner;
+ };
+
DrawObject() {}
explicit DrawObject(const BaseState& base_state);
+ // Extract the scale vector from this object's transform.
+ math::Vector2dF GetScale() const;
+
+ // Remove scale from the transform, and return the scale vector.
+ math::Vector2dF RemoveScaleFromTransform();
+
// Utility function to get the render color for the blend modes that will
// be used. These modes expect alpha to be pre-multiplied.
static render_tree::ColorRGBA GetDrawColor(
@@ -108,14 +131,28 @@
return GetGLRGBA(color.r(), color.g(), color.b(), color.a());
}
- // Set shader uniforms for a rounded rect. Specify a non-zero inset if
- // the rect will be used with anti-aliasing (e.g. 0.5 inset for a 1-pixel
- // anti-aliasing border).
- static void SetRRectUniforms(GLint rect_uniform, GLint corners_uniform,
- const math::RectF& rect, const render_tree::RoundedCorners& corners,
- float inset);
+ // Get the vertex attributes to use to draw the given rounded rect. Each
+ // corner uses a different attribute. These RCorner values must be transformed
+ // before being passed to the shader. (See RCorner constructor.)
+ static void GetRRectAttributes(const math::RectF& bounds,
+ math::RectF rect, render_tree::RoundedCorners corners,
+ RRectAttributes (&out_attributes)[4]);
+
+ // Get the vertex attributes to draw the given rounded rect excluding the
+ // inscribed rect. These RCorner values must be transformed before being
+ // passed to the shader. (See RCorner constructor.)
+ static void GetRRectAttributes(const math::RectF& bounds,
+ math::RectF rect, render_tree::RoundedCorners corners,
+ RRectAttributes (&out_attributes)[8]);
BaseState base_state_;
+
+ private:
+ // Return the RCorner values for the given rounded rect, and the normalized
+ // rect and corner values used.
+ static void GetRCornerValues(math::RectF* rect,
+ render_tree::RoundedCorners* corners,
+ RRectAttributes out_rcorners[4]);
};
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rect_border.cc b/src/cobalt/renderer/rasterizer/egl/draw_rect_border.cc
index cdc1d88..829ef32 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rect_border.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rect_border.cc
@@ -121,26 +121,18 @@
const math::RectF& border_rect, const math::RectF& content_rect,
const render_tree::ColorRGBA& border_color,
const render_tree::ColorRGBA& content_color) {
- // Extract scale from the transform matrix. This can be reliably done if no
- // rotations are involved.
- const float kEpsilon = 0.0001f;
- if (std::abs(base_state_.transform(0, 1)) >= kEpsilon ||
- std::abs(base_state_.transform(1, 0)) >= kEpsilon) {
- return false;
- }
-
- // If the scale is 0 in either direction, then there's nothing to render.
- float scale_x = std::abs(base_state_.transform(0, 0));
- float scale_y = std::abs(base_state_.transform(1, 1));
- if (scale_x <= kEpsilon || scale_y <= kEpsilon) {
+ // If the scaled border rect is too small, then don't bother rendering.
+ math::Vector2dF scale = GetScale();
+ if (border_rect.width() * scale.x() < 1.0f ||
+ border_rect.height() * scale.y() < 1.0f) {
return true;
}
// Antialiased subpixel borders are not supported at this time. It can be
// done by attenuating the alpha, but this can get complicated if the borders
// are of different widths.
- float pixel_size_x = 1.0f / scale_x;
- float pixel_size_y = 1.0f / scale_y;
+ float pixel_size_x = 1.0f / scale.x();
+ float pixel_size_y = 1.0f / scale.y();
if (border.left.width < pixel_size_x || border.right.width < pixel_size_x ||
border.top.width < pixel_size_y || border.bottom.width < pixel_size_y) {
return false;
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.cc b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.cc
index 13401c9..fcabd2d 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.cc
@@ -80,128 +80,340 @@
}
} // namespace
+DrawRectShadowBlur::VertexAttributesSquare::VertexAttributesSquare(
+ float x, float y, float offset_scale) {
+ position[0] = x;
+ position[1] = y;
+ offset[0] = x * offset_scale;
+ offset[1] = y * offset_scale;
+}
+
+DrawRectShadowBlur::VertexAttributesRound::VertexAttributesRound(
+ float x, float y, const RCorner& init) {
+ position[0] = x;
+ position[1] = y;
+ rcorner_scissor = RCorner(position, init);
+}
+
DrawRectShadowBlur::DrawRectShadowBlur(GraphicsState* graphics_state,
const BaseState& base_state, const math::RectF& base_rect,
const OptionalRoundedCorners& base_corners, const math::RectF& spread_rect,
const OptionalRoundedCorners& spread_corners,
const render_tree::ColorRGBA& color, float blur_sigma, bool inset)
- : DrawRectShadowSpread(graphics_state, base_state),
+ : DrawObject(base_state),
spread_rect_(spread_rect),
spread_corners_(spread_corners),
blur_sigma_(blur_sigma),
- is_inset_(inset) {
- const float kBlurExtentInPixels = kBlurExtentInSigmas * blur_sigma;
+ is_inset_(inset),
+ vertex_buffer_(nullptr),
+ index_buffer_(nullptr) {
+ color_ = GetDrawColor(color) * base_state_.opacity;
- if (inset) {
- outer_rect_ = base_rect;
- outer_corners_ = base_corners;
- inner_rect_ = spread_rect;
- inner_rect_.Inset(kBlurExtentInPixels, kBlurExtentInPixels);
- if (inner_rect_.IsEmpty()) {
- inner_rect_.set_origin(spread_rect.CenterPoint());
- }
- if (spread_corners) {
- inner_corners_ = spread_corners->Inset(kBlurExtentInPixels,
- kBlurExtentInPixels, kBlurExtentInPixels, kBlurExtentInPixels);
- }
- } else {
- inner_rect_ = base_rect;
- inner_corners_ = base_corners;
- outer_rect_ = spread_rect;
- outer_rect_.Outset(kBlurExtentInPixels, kBlurExtentInPixels);
- if (spread_corners) {
- outer_corners_ = spread_corners->Inset(-kBlurExtentInPixels,
- -kBlurExtentInPixels, -kBlurExtentInPixels, -kBlurExtentInPixels);
- }
+ // Extract scale from the transform and move it into the vertex attributes
+ // so that the anti-aliased edges remain 1 pixel wide.
+ math::Vector2dF scale = RemoveScaleFromTransform();
+ math::RectF scaled_base_rect(base_rect);
+ scaled_base_rect.Scale(scale.x(), scale.y());
+ OptionalRoundedCorners scaled_base_corners(base_corners);
+ if (scaled_base_corners) {
+ scaled_base_corners = scaled_base_corners->Scale(scale.x(), scale.y());
+ scaled_base_corners = scaled_base_corners->Normalize(scaled_base_rect);
+ }
+ spread_rect_.Scale(scale.x(), scale.y());
+ if (spread_corners_) {
+ spread_corners_ = spread_corners_->Scale(scale.x(), scale.y());
+ spread_corners_ = spread_corners_->Normalize(spread_rect_);
}
- if (base_corners || spread_corners) {
- // If rounded rects are specified, then both the base and spread rects
- // must have rounded corners.
- DCHECK(inner_corners_);
- DCHECK(outer_corners_);
- DCHECK(spread_corners_);
- } else {
- // Non-rounded rects specify vertex offset in terms of sigma from the
- // center of the spread rect.
- offset_scale_ = kBlurDistance / kBlurExtentInPixels;
- offset_center_ = spread_rect_.CenterPoint();
- }
+ // The blur algorithms used by the shaders do not produce good results with
+ // separate x and y blur sigmas. Select a single blur sigma to approximate
+ // the desired blur.
+ blur_sigma_ *= std::sqrt(scale.x() * scale.y());
- color_ = GetGLRGBA(GetDrawColor(color) * base_state_.opacity);
+ SetGeometry(graphics_state, scaled_base_rect, scaled_base_corners);
+}
+
+void DrawRectShadowBlur::ExecuteUpdateVertexBuffer(
+ GraphicsState* graphics_state,
+ ShaderProgramManager* program_manager) {
+ if (attributes_square_.size() > 0) {
+ vertex_buffer_ = graphics_state->AllocateVertexData(
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+ SbMemoryCopy(vertex_buffer_, &attributes_square_[0],
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+ } else if (attributes_round_.size() > 0) {
+ vertex_buffer_ = graphics_state->AllocateVertexData(
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ SbMemoryCopy(vertex_buffer_, &attributes_round_[0],
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ index_buffer_ = graphics_state->AllocateVertexIndices(indices_.size());
+ SbMemoryCopy(index_buffer_, &indices_[0],
+ indices_.size() * sizeof(indices_[0]));
+ }
}
void DrawRectShadowBlur::ExecuteRasterize(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
+ if (vertex_buffer_ == nullptr) {
+ return;
+ }
+
// Draw the blurred shadow.
- if (inner_corners_) {
- ShaderProgram<CommonVertexShader,
+ if (spread_corners_) {
+ ShaderProgram<ShaderVertexOffsetRcorner,
ShaderFragmentColorBlurRrects>* program;
program_manager->GetProgram(&program);
graphics_state->UseProgram(program->GetHandle());
- SetupShader(program->GetVertexShader(), graphics_state);
-
+ SetupVertexShader(graphics_state, program->GetVertexShader());
+ SetFragmentUniforms(program->GetFragmentShader().u_color(),
+ program->GetFragmentShader().u_scale_add());
float sigma_scale = kBlurDistance / (kBlurExtentInSigmas * blur_sigma_);
GL_CALL(glUniform2f(program->GetFragmentShader().u_sigma_scale(),
sigma_scale, sigma_scale));
-
// Pre-calculate the scale values to calculate the normalized gaussian.
GL_CALL(glUniform2f(program->GetFragmentShader().u_gaussian_scale(),
-1.0f / (2.0f * blur_sigma_ * blur_sigma_),
1.0f / (kSqrt2 * kSqrtPi * blur_sigma_)));
- if (is_inset_) {
- // Set the outer rect to be an inclusive scissor, and invert the shadow.
- SetRRectUniforms(program->GetFragmentShader().u_scissor_rect(),
- program->GetFragmentShader().u_scissor_corners(),
- outer_rect_, *outer_corners_, 0.5f);
- GL_CALL(glUniform2f(program->GetFragmentShader().u_scale_add(),
- -1.0f, 1.0f));
- } else {
- // Set the inner rect to be an exclusive scissor.
- SetRRectUniforms(program->GetFragmentShader().u_scissor_rect(),
- program->GetFragmentShader().u_scissor_corners(),
- inner_rect_, *inner_corners_, 0.5f);
- GL_CALL(glUniform2f(program->GetFragmentShader().u_scale_add(),
- 1.0f, 0.0f));
- }
SetBlurRRectUniforms(program->GetFragmentShader(),
spread_rect_, *spread_corners_, blur_sigma_);
+ GL_CALL(glDrawElements(GL_TRIANGLES, indices_.size(), GL_UNSIGNED_SHORT,
+ graphics_state->GetVertexIndexPointer(index_buffer_)));
} else {
- ShaderProgram<CommonVertexShader,
+ ShaderProgram<ShaderVertexOffset,
ShaderFragmentColorBlur>* program;
program_manager->GetProgram(&program);
graphics_state->UseProgram(program->GetHandle());
- SetupShader(program->GetVertexShader(), graphics_state);
- if (is_inset_) {
- // Invert the shadow.
- GL_CALL(glUniform2f(program->GetFragmentShader().u_scale_add(),
- -1.0f, 1.0f));
- } else {
- // Keep the normal (outset) shadow.
- GL_CALL(glUniform2f(program->GetFragmentShader().u_scale_add(),
- 1.0f, 0.0f));
- }
+ SetupVertexShader(graphics_state, program->GetVertexShader());
+ SetFragmentUniforms(program->GetFragmentShader().u_color(),
+ program->GetFragmentShader().u_scale_add());
GL_CALL(glUniform4f(program->GetFragmentShader().u_blur_rect(),
- (spread_rect_.x() - offset_center_.x()) * offset_scale_,
- (spread_rect_.y() - offset_center_.y()) * offset_scale_,
- (spread_rect_.right() - offset_center_.x()) * offset_scale_,
- (spread_rect_.bottom() - offset_center_.y()) * offset_scale_));
+ spread_rect_.x(), spread_rect_.y(),
+ spread_rect_.right(), spread_rect_.bottom()));
+ GL_CALL(glDrawArrays(GL_TRIANGLE_STRIP, 0, attributes_square_.size()));
}
-
- GL_CALL(glDrawArrays(GL_TRIANGLE_STRIP, 0, vertex_count_));
}
base::TypeId DrawRectShadowBlur::GetTypeId() const {
- if (inner_corners_) {
- return ShaderProgram<CommonVertexShader,
+ if (spread_corners_) {
+ return ShaderProgram<ShaderVertexOffsetRcorner,
ShaderFragmentColorBlurRrects>::GetTypeId();
} else {
- return ShaderProgram<CommonVertexShader,
+ return ShaderProgram<ShaderVertexOffset,
ShaderFragmentColorBlur>::GetTypeId();
}
}
+void DrawRectShadowBlur::SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexOffset& shader) {
+ graphics_state->UpdateClipAdjustment(shader.u_clip_adjustment());
+ graphics_state->UpdateTransformMatrix(shader.u_view_matrix(),
+ base_state_.transform);
+ graphics_state->Scissor(base_state_.scissor.x(), base_state_.scissor.y(),
+ base_state_.scissor.width(), base_state_.scissor.height());
+ graphics_state->VertexAttribPointer(
+ shader.a_position(), 2, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesSquare), vertex_buffer_ +
+ offsetof(VertexAttributesSquare, position));
+ graphics_state->VertexAttribPointer(
+ shader.a_offset(), 2, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesSquare), vertex_buffer_ +
+ offsetof(VertexAttributesSquare, offset));
+ graphics_state->VertexAttribFinish();
+}
+
+void DrawRectShadowBlur::SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexOffsetRcorner& shader) {
+ graphics_state->UpdateClipAdjustment(shader.u_clip_adjustment());
+ graphics_state->UpdateTransformMatrix(shader.u_view_matrix(),
+ base_state_.transform);
+ graphics_state->Scissor(base_state_.scissor.x(), base_state_.scissor.y(),
+ base_state_.scissor.width(), base_state_.scissor.height());
+ graphics_state->VertexAttribPointer(
+ shader.a_position(), 2, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesRound), vertex_buffer_ +
+ offsetof(VertexAttributesRound, position));
+ graphics_state->VertexAttribPointer(
+ shader.a_rcorner(), 4, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesRound), vertex_buffer_ +
+ offsetof(VertexAttributesRound, rcorner_scissor));
+ graphics_state->VertexAttribFinish();
+}
+
+void DrawRectShadowBlur::SetFragmentUniforms(
+ GLint color_uniform, GLint scale_add_uniform) {
+ GL_CALL(glUniform4f(color_uniform,
+ color_.r(), color_.g(), color_.b(), color_.a()));
+ if (is_inset_) {
+ // Invert the shadow.
+ GL_CALL(glUniform2f(scale_add_uniform, -1.0f, 1.0f));
+ } else {
+ // Keep the normal (outset) shadow.
+ GL_CALL(glUniform2f(scale_add_uniform, 1.0f, 0.0f));
+ }
+}
+
+void DrawRectShadowBlur::SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& base_rect, const OptionalRoundedCorners& base_corners) {
+ const float kBlurExtentInPixels = kBlurExtentInSigmas * blur_sigma_;
+
+ if (base_corners || spread_corners_) {
+ // If rounded rects are specified, then both the base and spread rects
+ // must have rounded corners.
+ DCHECK(base_corners);
+ DCHECK(spread_corners_);
+
+ if (is_inset_) {
+ // Extend the outer rect to include the antialiased edge.
+ math::RectF outer_rect(base_rect);
+ outer_rect.Outset(1.0f, 1.0f);
+ RRectAttributes rrect_outer[4];
+ GetRRectAttributes(outer_rect, base_rect, *base_corners, rrect_outer);
+ // Inset the spread rect by the blur extent. Use that as the inner bounds.
+ RRectAttributes rrect_inner[8];
+ math::RectF inner_rect(spread_rect_);
+ inner_rect.Inset(kBlurExtentInPixels, kBlurExtentInPixels);
+ if (!inner_rect.IsEmpty()) {
+ // Get the inner bounds excluding the inscribed rect.
+ render_tree::RoundedCorners inner_corners = spread_corners_->Inset(
+ kBlurExtentInPixels, kBlurExtentInPixels, kBlurExtentInPixels,
+ kBlurExtentInPixels);
+ inner_corners = inner_corners.Normalize(inner_rect);
+ GetRRectAttributes(outer_rect, inner_rect, inner_corners, rrect_inner);
+ } else {
+ // The blur covers everything inside the outer rect.
+ rrect_inner[0].bounds = outer_rect;
+ }
+ SetGeometry(graphics_state, rrect_outer, rrect_inner);
+ } else {
+ // Extend the outer rect to include the blur.
+ math::RectF outer_rect(spread_rect_);
+ outer_rect.Outset(kBlurExtentInPixels, kBlurExtentInPixels);
+ // Exclude the inscribed rect of the base rounded rect.
+ RRectAttributes rrect[8];
+ GetRRectAttributes(outer_rect, base_rect, *base_corners, rrect);
+ SetGeometry(graphics_state, rrect);
+ }
+ } else {
+ // Handle box shadow with square corners.
+ if (is_inset_) {
+ math::RectF inner_rect(spread_rect_);
+ inner_rect.Inset(kBlurExtentInPixels, kBlurExtentInPixels);
+ SetGeometry(graphics_state, inner_rect, base_rect);
+ } else {
+ math::RectF outer_rect(spread_rect_);
+ outer_rect.Outset(kBlurExtentInPixels, kBlurExtentInPixels);
+ SetGeometry(graphics_state, base_rect, outer_rect);
+ }
+ }
+}
+
+void DrawRectShadowBlur::SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& inner_rect, const math::RectF& outer_rect) {
+ // Express offset in terms of blur sigma for the shader.
+ float offset_scale = kBlurDistance / (kBlurExtentInSigmas * blur_sigma_);
+
+ // The spread rect should also be expressed in terms of sigma.
+ spread_rect_.Scale(offset_scale, offset_scale);
+
+ // The box shadow is a triangle strip covering the area between outer rect
+ // and inner rect.
+ if (inner_rect.IsEmpty()) {
+ attributes_square_.reserve(4);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.bottom(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.bottom(), offset_scale);
+ } else {
+ math::RectF inside_rect(inner_rect);
+ inside_rect.Intersect(outer_rect);
+ attributes_square_.reserve(10);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ inside_rect.right(), inside_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.bottom(), offset_scale);
+ attributes_square_.emplace_back(
+ inside_rect.right(), inside_rect.bottom(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.bottom(), offset_scale);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.bottom(), offset_scale);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), offset_scale);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.y(), offset_scale);
+ }
+
+ graphics_state->ReserveVertexData(
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+}
+
+void DrawRectShadowBlur::SetGeometry(GraphicsState* graphics_state,
+ const RRectAttributes (&rrect)[8]) {
+ // The shadowed area is already split into quads.
+ for (int i = 0; i < arraysize(rrect); ++i) {
+ uint16_t vert = static_cast<uint16_t>(attributes_round_.size());
+ const math::RectF& bounds = rrect[i].bounds;
+ const RCorner& rcorner = rrect[i].rcorner;
+ attributes_round_.emplace_back(bounds.x(), bounds.y(), rcorner);
+ attributes_round_.emplace_back(bounds.right(), bounds.y(), rcorner);
+ attributes_round_.emplace_back(bounds.x(), bounds.bottom(), rcorner);
+ attributes_round_.emplace_back(bounds.right(), bounds.bottom(), rcorner);
+ indices_.emplace_back(vert);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 3);
+ }
+
+ graphics_state->ReserveVertexData(
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ graphics_state->ReserveVertexIndices(indices_.size());
+}
+
+void DrawRectShadowBlur::SetGeometry(GraphicsState* graphics_state,
+ const RRectAttributes (&rrect_outer)[4],
+ const RRectAttributes (&rrect_inner)[8]) {
+ // Draw the area between the inner rect and outer rect using the outer rect's
+ // rounded corners. The inner quads already exclude the inscribed rectangle.
+ for (int i = 0; i < arraysize(rrect_inner); ++i) {
+ for (int o = 0; o < arraysize(rrect_outer); ++o) {
+ math::RectF rect = math::IntersectRects(
+ rrect_inner[i].bounds, rrect_outer[o].bounds);
+ if (!rect.IsEmpty()) {
+ // Use two triangles to draw the intersection.
+ const RCorner& rcorner = rrect_outer[o].rcorner;
+ uint16_t vert = static_cast<uint16_t>(attributes_round_.size());
+ attributes_round_.emplace_back(rect.x(), rect.y(), rcorner);
+ attributes_round_.emplace_back(rect.right(), rect.y(), rcorner);
+ attributes_round_.emplace_back(rect.x(), rect.bottom(), rcorner);
+ attributes_round_.emplace_back(rect.right(), rect.bottom(), rcorner);
+ indices_.emplace_back(vert);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 3);
+ }
+ }
+ }
+
+ graphics_state->ReserveVertexData(
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ graphics_state->ReserveVertexIndices(indices_.size());
+}
+
} // namespace egl
} // namespace rasterizer
} // namespace renderer
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.h b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.h
index 8d36722..107af85 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.h
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_blur.h
@@ -15,7 +15,12 @@
#ifndef COBALT_RENDERER_RASTERIZER_EGL_DRAW_RECT_SHADOW_BLUR_H_
#define COBALT_RENDERER_RASTERIZER_EGL_DRAW_RECT_SHADOW_BLUR_H_
-#include "cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.h"
+#include <vector>
+
+#include "cobalt/math/rect_f.h"
+#include "cobalt/render_tree/color_rgba.h"
+#include "cobalt/renderer/rasterizer/egl/draw_object.h"
+#include "egl/generated_shader_impl.h"
namespace cobalt {
namespace renderer {
@@ -40,12 +45,7 @@
// Handles drawing a box shadow with blur. This uses a gaussian kernel to fade
// the "blur" region.
-//
-// This uses a shader to mimic skia's SkBlurMask.cpp.
-// See also http://stereopsis.com/shadowrect/ as reference for the formula
-// used to approximate the gaussian integral (which controls the opacity of
-// the shadow).
-class DrawRectShadowBlur : public DrawRectShadowSpread {
+class DrawRectShadowBlur : public DrawObject {
public:
// Draw a blurred box shadow.
// The box shadow exists in the area between |base_rect| and |spread_rect|
@@ -59,16 +59,55 @@
const render_tree::ColorRGBA& color,
float blur_sigma, bool inset);
+ void ExecuteUpdateVertexBuffer(GraphicsState* graphics_state,
+ ShaderProgramManager* program_manager) OVERRIDE;
void ExecuteRasterize(GraphicsState* graphics_state,
ShaderProgramManager* program_manager) OVERRIDE;
base::TypeId GetTypeId() const OVERRIDE;
private:
+ struct VertexAttributesSquare {
+ VertexAttributesSquare(float x, float y, float offset_scale);
+ float position[2];
+ float offset[2];
+ };
+
+ struct VertexAttributesRound {
+ VertexAttributesRound(float x, float y, const RCorner& init);
+ float position[2];
+ RCorner rcorner_scissor;
+ };
+
+ void SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexOffset& shader);
+ void SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexOffsetRcorner& shader);
+ void SetFragmentUniforms(GLint color_uniform, GLint scale_add_uniform);
+
+ void SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& base_rect,
+ const OptionalRoundedCorners& base_corners);
+ void SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& inner_rect,
+ const math::RectF& outer_rect);
+ void SetGeometry(GraphicsState* graphics_state,
+ const RRectAttributes (&rrect)[8]);
+ void SetGeometry(GraphicsState* graphics_state,
+ const RRectAttributes (&rrect_outer)[4],
+ const RRectAttributes (&rrect_inner)[8]);
+
math::RectF spread_rect_;
OptionalRoundedCorners spread_corners_;
-
+ render_tree::ColorRGBA color_;
float blur_sigma_;
bool is_inset_;
+
+ std::vector<VertexAttributesSquare> attributes_square_;
+ std::vector<VertexAttributesRound> attributes_round_;
+ std::vector<uint16_t> indices_;
+
+ uint8_t* vertex_buffer_;
+ uint16_t* index_buffer_;
};
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.cc b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.cc
index 9dba0d3..faa7286 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.cc
@@ -26,8 +26,19 @@
namespace rasterizer {
namespace egl {
-namespace {
-const int kVertexCount = 10;
+DrawRectShadowSpread::VertexAttributesSquare::VertexAttributesSquare(
+ float x, float y, uint32_t in_color) {
+ position[0] = x;
+ position[1] = y;
+ color = in_color;
+}
+
+DrawRectShadowSpread::VertexAttributesRound::VertexAttributesRound(
+ float x, float y, const RCorner& inner, const RCorner& outer) {
+ position[0] = x;
+ position[1] = y;
+ rcorner_inner = RCorner(position, inner);
+ rcorner_outer = RCorner(position, outer);
}
DrawRectShadowSpread::DrawRectShadowSpread(GraphicsState* graphics_state,
@@ -36,121 +47,94 @@
const OptionalRoundedCorners& outer_corners,
const render_tree::ColorRGBA& color)
: DrawObject(base_state),
- inner_rect_(inner_rect),
- outer_rect_(outer_rect),
- inner_corners_(inner_corners),
- outer_corners_(outer_corners),
- offset_scale_(1.0f),
vertex_buffer_(nullptr),
- vertex_count_(0) {
- color_ = GetGLRGBA(GetDrawColor(color) * base_state_.opacity);
- if (inner_corners_ || outer_corners_) {
+ index_buffer_(nullptr) {
+ color_ = GetDrawColor(color) * base_state_.opacity;
+
+ // Extract scale from the transform and move it into the vertex attributes
+ // so that the anti-aliased edges remain 1 pixel wide.
+ math::Vector2dF scale = RemoveScaleFromTransform();
+ math::RectF inside_rect(inner_rect);
+ math::RectF outside_rect(outer_rect);
+ inside_rect.Scale(scale.x(), scale.y());
+ outside_rect.Scale(scale.x(), scale.y());
+
+ if (inner_corners || outer_corners) {
// If using rounded corners, then both inner and outer rects must have
// rounded corner definitions.
- DCHECK(inner_corners_);
- DCHECK(outer_corners_);
+ DCHECK(inner_corners);
+ DCHECK(outer_corners);
+ render_tree::RoundedCorners inside_corners =
+ inner_corners->Scale(scale.x(), scale.y());
+ render_tree::RoundedCorners outside_corners =
+ outer_corners->Scale(scale.x(), scale.y());
+ SetGeometry(graphics_state,
+ inside_rect, inside_corners,
+ outside_rect, outside_corners);
+ } else {
+ SetGeometry(graphics_state, inside_rect, outside_rect);
}
- graphics_state->ReserveVertexData(kVertexCount * sizeof(VertexAttributes));
-}
-
-DrawRectShadowSpread::DrawRectShadowSpread(GraphicsState* graphics_state,
- const BaseState& base_state)
- : DrawObject(base_state),
- offset_scale_(1.0f),
- vertex_buffer_(nullptr),
- vertex_count_(0) {
- graphics_state->ReserveVertexData(kVertexCount * sizeof(VertexAttributes));
}
void DrawRectShadowSpread::ExecuteUpdateVertexBuffer(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
- // Draw the box shadow's spread. This is a triangle strip covering the area
- // between outer rect and inner rect.
- math::RectF inside_rect(inner_rect_);
- math::RectF outside_rect(outer_rect_);
- VertexAttributes attributes[kVertexCount];
-
- if (inner_corners_) {
- // Inset the inside rect to include the rounded corners.
- inside_rect.Inset(
- std::max(inner_corners_->bottom_left.horizontal,
- inner_corners_->top_left.horizontal),
- std::max(inner_corners_->top_left.vertical,
- inner_corners_->top_right.vertical),
- std::max(inner_corners_->top_right.horizontal,
- inner_corners_->bottom_right.horizontal),
- std::max(inner_corners_->bottom_right.vertical,
- inner_corners_->bottom_left.vertical));
-
- // Add a 1 pixel border to the outer rect for anti-aliasing.
- outside_rect.Outset(1.0f, 1.0f);
+ if (attributes_square_.size() > 0) {
+ vertex_buffer_ = graphics_state->AllocateVertexData(
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+ SbMemoryCopy(vertex_buffer_, &attributes_square_[0],
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+ } else if (attributes_round_.size() > 0) {
+ vertex_buffer_ = graphics_state->AllocateVertexData(
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ SbMemoryCopy(vertex_buffer_, &attributes_round_[0],
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ index_buffer_ = graphics_state->AllocateVertexIndices(indices_.size());
+ SbMemoryCopy(index_buffer_, &indices_[0],
+ indices_.size() * sizeof(indices_[0]));
}
-
- // Only pixels inside the outer rect should be touched.
- if (inside_rect.IsEmpty()) {
- vertex_count_ = 4;
- SetVertex(&attributes[0], outside_rect.x(), outside_rect.y());
- SetVertex(&attributes[1], outside_rect.right(), outside_rect.y());
- SetVertex(&attributes[2], outside_rect.x(), outside_rect.bottom());
- SetVertex(&attributes[3], outside_rect.right(), outside_rect.bottom());
- } else {
- inside_rect.Intersect(outside_rect);
- vertex_count_ = 10;
- SetVertex(&attributes[0], outside_rect.x(), outside_rect.y());
- SetVertex(&attributes[1], inside_rect.x(), inside_rect.y());
- SetVertex(&attributes[2], outside_rect.right(), outside_rect.y());
- SetVertex(&attributes[3], inside_rect.right(), inside_rect.y());
- SetVertex(&attributes[4], outside_rect.right(), outside_rect.bottom());
- SetVertex(&attributes[5], inside_rect.right(), inside_rect.bottom());
- SetVertex(&attributes[6], outside_rect.x(), outside_rect.bottom());
- SetVertex(&attributes[7], inside_rect.x(), inside_rect.bottom());
- SetVertex(&attributes[8], outside_rect.x(), outside_rect.y());
- SetVertex(&attributes[9], inside_rect.x(), inside_rect.y());
- }
-
- vertex_buffer_ = graphics_state->AllocateVertexData(
- vertex_count_ * sizeof(VertexAttributes));
- SbMemoryCopy(vertex_buffer_, attributes,
- vertex_count_ * sizeof(VertexAttributes));
}
void DrawRectShadowSpread::ExecuteRasterize(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
- if (inner_corners_) {
- ShaderProgram<CommonVertexShader,
- ShaderFragmentColorBetweenRrects>* program;
- program_manager->GetProgram(&program);
- graphics_state->UseProgram(program->GetHandle());
- SetupShader(program->GetVertexShader(), graphics_state);
-
- SetRRectUniforms(program->GetFragmentShader().u_inner_rect(),
- program->GetFragmentShader().u_inner_corners(),
- inner_rect_, *inner_corners_, 0.5f);
- SetRRectUniforms(program->GetFragmentShader().u_outer_rect(),
- program->GetFragmentShader().u_outer_corners(),
- outer_rect_, *outer_corners_, 0.5f);
- } else {
- ShaderProgram<CommonVertexShader, ShaderFragmentColorInclude>* program;
- program_manager->GetProgram(&program);
- graphics_state->UseProgram(program->GetHandle());
- SetupShader(program->GetVertexShader(), graphics_state);
-
- float include[4] = {
- outer_rect_.x(),
- outer_rect_.y(),
- outer_rect_.right(),
- outer_rect_.bottom()
- };
- GL_CALL(glUniform4fv(program->GetFragmentShader().u_include(), 1, include));
+ if (vertex_buffer_ == nullptr) {
+ return;
}
- GL_CALL(glDrawArrays(GL_TRIANGLE_STRIP, 0, vertex_count_));
+ // Draw the box shadow.
+ if (attributes_square_.size() > 0) {
+ ShaderProgram<ShaderVertexColor,
+ ShaderFragmentColor>* program;
+ program_manager->GetProgram(&program);
+ graphics_state->UseProgram(program->GetHandle());
+ SetupVertexShader(graphics_state, program->GetVertexShader());
+ GL_CALL(glDrawArrays(GL_TRIANGLE_STRIP, 0, attributes_square_.size()));
+ } else {
+ ShaderProgram<ShaderVertexRcorner2,
+ ShaderFragmentRcorner2Color>* program;
+ program_manager->GetProgram(&program);
+ graphics_state->UseProgram(program->GetHandle());
+ SetupVertexShader(graphics_state, program->GetVertexShader());
+ GL_CALL(glUniform4f(program->GetFragmentShader().u_color(),
+ color_.r(), color_.g(), color_.b(), color_.a()));
+ GL_CALL(glDrawElements(GL_TRIANGLES, indices_.size(), GL_UNSIGNED_SHORT,
+ graphics_state->GetVertexIndexPointer(index_buffer_)));
+ }
}
-void DrawRectShadowSpread::SetupShader(const CommonVertexShader& shader,
- GraphicsState* graphics_state) {
+base::TypeId DrawRectShadowSpread::GetTypeId() const {
+ if (attributes_square_.size() > 0) {
+ return ShaderProgram<ShaderVertexColor,
+ ShaderFragmentColor>::GetTypeId();
+ } else {
+ return ShaderProgram<ShaderVertexRcorner2,
+ ShaderFragmentRcorner2Color>::GetTypeId();
+ }
+}
+
+void DrawRectShadowSpread::SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexColor& shader) {
graphics_state->UpdateClipAdjustment(shader.u_clip_adjustment());
graphics_state->UpdateTransformMatrix(shader.u_view_matrix(),
base_state_.transform);
@@ -158,36 +142,134 @@
base_state_.scissor.width(), base_state_.scissor.height());
graphics_state->VertexAttribPointer(
shader.a_position(), 2, GL_FLOAT, GL_FALSE,
- sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, position));
+ sizeof(VertexAttributesSquare), vertex_buffer_ +
+ offsetof(VertexAttributesSquare, position));
graphics_state->VertexAttribPointer(
shader.a_color(), 4, GL_UNSIGNED_BYTE, GL_TRUE,
- sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, color));
- graphics_state->VertexAttribPointer(
- shader.a_offset(), 2, GL_FLOAT, GL_FALSE,
- sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, offset));
+ sizeof(VertexAttributesSquare), vertex_buffer_ +
+ offsetof(VertexAttributesSquare, color));
graphics_state->VertexAttribFinish();
}
-base::TypeId DrawRectShadowSpread::GetTypeId() const {
- if (inner_corners_) {
- return ShaderProgram<CommonVertexShader,
- ShaderFragmentColorBetweenRrects>::GetTypeId();
- } else {
- return ShaderProgram<CommonVertexShader,
- ShaderFragmentColorInclude>::GetTypeId();
- }
+void DrawRectShadowSpread::SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexRcorner2& shader) {
+ graphics_state->UpdateClipAdjustment(shader.u_clip_adjustment());
+ graphics_state->UpdateTransformMatrix(shader.u_view_matrix(),
+ base_state_.transform);
+ graphics_state->Scissor(base_state_.scissor.x(), base_state_.scissor.y(),
+ base_state_.scissor.width(), base_state_.scissor.height());
+ graphics_state->VertexAttribPointer(
+ shader.a_position(), 2, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesRound), vertex_buffer_ +
+ offsetof(VertexAttributesRound, position));
+ graphics_state->VertexAttribPointer(
+ shader.a_rcorner_inner(), 4, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesRound), vertex_buffer_ +
+ offsetof(VertexAttributesRound, rcorner_inner));
+ graphics_state->VertexAttribPointer(
+ shader.a_rcorner_outer(), 4, GL_FLOAT, GL_FALSE,
+ sizeof(VertexAttributesRound), vertex_buffer_ +
+ offsetof(VertexAttributesRound, rcorner_outer));
+ graphics_state->VertexAttribFinish();
}
-void DrawRectShadowSpread::SetVertex(VertexAttributes* vertex,
- float x, float y) {
- vertex->position[0] = x;
- vertex->position[1] = y;
- vertex->offset[0] = (x - offset_center_.x()) * offset_scale_;
- vertex->offset[1] = (y - offset_center_.y()) * offset_scale_;
- vertex->color = color_;
+void DrawRectShadowSpread::SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& inner_rect, const math::RectF& outer_rect) {
+ // Draw the box shadow's spread. This is a triangle strip covering the area
+ // between outer rect and inner rect.
+ uint32_t color = GetGLRGBA(color_);
+
+ if (inner_rect.IsEmpty()) {
+ attributes_square_.reserve(4);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), color);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.y(), color);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.bottom(), color);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.bottom(), color);
+ } else {
+ math::RectF inside_rect(inner_rect);
+ inside_rect.Intersect(outer_rect);
+ attributes_square_.reserve(10);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), color);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.y(), color);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.y(), color);
+ attributes_square_.emplace_back(
+ inside_rect.right(), inside_rect.y(), color);
+ attributes_square_.emplace_back(
+ outer_rect.right(), outer_rect.bottom(), color);
+ attributes_square_.emplace_back(
+ inside_rect.right(), inside_rect.bottom(), color);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.bottom(), color);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.bottom(), color);
+ attributes_square_.emplace_back(
+ outer_rect.x(), outer_rect.y(), color);
+ attributes_square_.emplace_back(
+ inside_rect.x(), inside_rect.y(), color);
+ }
+
+ graphics_state->ReserveVertexData(
+ attributes_square_.size() * sizeof(attributes_square_[0]));
+}
+
+void DrawRectShadowSpread::SetGeometry(
+ GraphicsState* graphics_state,
+ const math::RectF& inner_rect,
+ const render_tree::RoundedCorners& inner_corners,
+ const math::RectF& outer_rect,
+ const render_tree::RoundedCorners& outer_corners) {
+ // Draw the area between the inner rounded rect and outer rounded rect. Add
+ // a 1-pixel border to include antialiasing.
+ math::RectF bounds(outer_rect);
+ bounds.Outset(1.0f, 1.0f);
+
+ // Get the render quads for the inner rounded rect excluding its inscribed
+ // rectangle.
+ RRectAttributes rrect_inner[8];
+ GetRRectAttributes(bounds, inner_rect, inner_corners, rrect_inner);
+
+ // Get the render quads for the outer rounded rect.
+ RRectAttributes rrect_outer[4];
+ GetRRectAttributes(bounds, outer_rect, outer_corners, rrect_outer);
+
+ // Add geometry to draw the area between the inner rrect and outer rrect.
+ for (int i = 0; i < arraysize(rrect_inner); ++i) {
+ for (int o = 0; o < arraysize(rrect_outer); ++o) {
+ math::RectF intersection = math::IntersectRects(
+ rrect_inner[i].bounds, rrect_outer[o].bounds);
+ if (!intersection.IsEmpty()) {
+ // Use two triangles to draw the intersection.
+ const RCorner& inner = rrect_inner[i].rcorner;
+ const RCorner& outer = rrect_outer[o].rcorner;
+ uint16_t vert = static_cast<uint16_t>(attributes_round_.size());
+ attributes_round_.emplace_back(
+ intersection.x(), intersection.y(), inner, outer);
+ attributes_round_.emplace_back(
+ intersection.right(), intersection.y(), inner, outer);
+ attributes_round_.emplace_back(
+ intersection.x(), intersection.bottom(), inner, outer);
+ attributes_round_.emplace_back(
+ intersection.right(), intersection.bottom(), inner, outer);
+ indices_.emplace_back(vert);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 1);
+ indices_.emplace_back(vert + 2);
+ indices_.emplace_back(vert + 3);
+ }
+ }
+ }
+
+ graphics_state->ReserveVertexData(
+ attributes_round_.size() * sizeof(attributes_round_[0]));
+ graphics_state->ReserveVertexIndices(indices_.size());
}
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.h b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.h
index 1e4d590..6df79ca 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.h
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rect_shadow_spread.h
@@ -15,6 +15,8 @@
#ifndef COBALT_RENDERER_RASTERIZER_EGL_DRAW_RECT_SHADOW_SPREAD_H_
#define COBALT_RENDERER_RASTERIZER_EGL_DRAW_RECT_SHADOW_SPREAD_H_
+#include <vector>
+
#include "cobalt/math/rect_f.h"
#include "cobalt/render_tree/color_rgba.h"
#include "cobalt/renderer/rasterizer/egl/draw_object.h"
@@ -58,31 +60,43 @@
ShaderProgramManager* program_manager) OVERRIDE;
base::TypeId GetTypeId() const OVERRIDE;
- protected:
- typedef ShaderVertexColorOffset CommonVertexShader;
-
- struct VertexAttributes {
+ private:
+ struct VertexAttributesSquare {
+ VertexAttributesSquare(float x, float y, uint32_t color);
float position[2];
- float offset[2];
uint32_t color;
};
- DrawRectShadowSpread(GraphicsState* graphics_state,
- const BaseState& base_state);
- void SetupShader(const CommonVertexShader& shader,
- GraphicsState* graphics_state);
- void SetVertex(VertexAttributes* vertex, float x, float y);
+ struct VertexAttributesRound {
+ VertexAttributesRound(float x, float y,
+ const RCorner& inner, const RCorner& outer);
+ float position[2];
+ RCorner rcorner_inner;
+ RCorner rcorner_outer;
+ };
- math::RectF inner_rect_;
- math::RectF outer_rect_;
- OptionalRoundedCorners inner_corners_;
- OptionalRoundedCorners outer_corners_;
- math::PointF offset_center_;
- float offset_scale_;
- uint32_t color_;
+ void SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexColor& shader);
+ void SetupVertexShader(GraphicsState* graphics_state,
+ const ShaderVertexRcorner2& shader);
+
+ void SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& inner_rect,
+ const math::RectF& outer_rect);
+ void SetGeometry(GraphicsState* graphics_state,
+ const math::RectF& inner_rect,
+ const render_tree::RoundedCorners& inner_corners,
+ const math::RectF& outer_rect,
+ const render_tree::RoundedCorners& outer_corners);
+
+ render_tree::ColorRGBA color_;
+
+ std::vector<VertexAttributesSquare> attributes_square_;
+ std::vector<VertexAttributesRound> attributes_round_;
+ std::vector<uint16_t> indices_;
uint8_t* vertex_buffer_;
- int vertex_count_;
+ uint16_t* index_buffer_;
};
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.cc b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.cc
index f12e7ed..2443bbe 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.cc
@@ -26,21 +26,7 @@
namespace egl {
namespace {
-const int kVertexCount = 4;
-
-struct VertexAttributes {
- float position[2];
- float offset[2];
- uint32_t color;
-};
-
-void SetVertex(VertexAttributes* vertex, float x, float y, uint32_t color) {
- vertex->position[0] = x;
- vertex->position[1] = y;
- vertex->offset[0] = x;
- vertex->offset[1] = y;
- vertex->color = color;
-}
+const int kVertexCount = 4 * 6;
} // namespace
DrawRRectColor::DrawRRectColor(GraphicsState* graphics_state,
@@ -51,8 +37,14 @@
rect_(rect),
corners_(corners),
vertex_buffer_(NULL) {
- color_ = GetGLRGBA(GetDrawColor(color) * base_state_.opacity);
+ color_ = GetDrawColor(color) * base_state_.opacity;
graphics_state->ReserveVertexData(kVertexCount * sizeof(VertexAttributes));
+
+ // Extract scale from the transform and move it into the vertex attributes
+ // so that the anti-aliased edges remain 1 pixel wide.
+ math::Vector2dF scale = RemoveScaleFromTransform();
+ rect_.Scale(scale.x(), scale.y());
+ corners_ = corners_.Scale(scale.x(), scale.y());
}
void DrawRRectColor::ExecuteUpdateVertexBuffer(
@@ -63,10 +55,31 @@
VertexAttributes attributes[kVertexCount];
math::RectF outer_rect(rect_);
outer_rect.Outset(1.0f, 1.0f);
- SetVertex(&attributes[0], outer_rect.x(), outer_rect.y(), color_);
- SetVertex(&attributes[1], outer_rect.right(), outer_rect.y(), color_);
- SetVertex(&attributes[2], outer_rect.right(), outer_rect.bottom(), color_);
- SetVertex(&attributes[3], outer_rect.x(), outer_rect.bottom(), color_);
+
+ RRectAttributes rrect[4];
+ GetRRectAttributes(outer_rect, rect_, corners_, rrect);
+ for (int r = 0, v = 0; r < arraysize(rrect); ++r) {
+ attributes[v ].position[0] = rrect[r].bounds.x();
+ attributes[v ].position[1] = rrect[r].bounds.y();
+ attributes[v ].rcorner =
+ RCorner(attributes[v ].position, rrect[r].rcorner);
+ attributes[v+1].position[0] = rrect[r].bounds.right();
+ attributes[v+1].position[1] = rrect[r].bounds.y();
+ attributes[v+1].rcorner =
+ RCorner(attributes[v+1].position, rrect[r].rcorner);
+ attributes[v+2].position[0] = rrect[r].bounds.x();
+ attributes[v+2].position[1] = rrect[r].bounds.bottom();
+ attributes[v+2].rcorner =
+ RCorner(attributes[v+2].position, rrect[r].rcorner);
+ attributes[v+3].position[0] = rrect[r].bounds.right();
+ attributes[v+3].position[1] = rrect[r].bounds.bottom();
+ attributes[v+3].rcorner =
+ RCorner(attributes[v+3].position, rrect[r].rcorner);
+ attributes[v+4] = attributes[v+1];
+ attributes[v+5] = attributes[v+2];
+ v += 6;
+ }
+
vertex_buffer_ = graphics_state->AllocateVertexData(sizeof(attributes));
SbMemoryCopy(vertex_buffer_, attributes, sizeof(attributes));
}
@@ -74,8 +87,8 @@
void DrawRRectColor::ExecuteRasterize(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
- ShaderProgram<ShaderVertexColorOffset,
- ShaderFragmentColorRrect>* program;
+ ShaderProgram<ShaderVertexRcorner,
+ ShaderFragmentRcornerColor>* program;
program_manager->GetProgram(&program);
graphics_state->UseProgram(program->GetHandle());
graphics_state->UpdateClipAdjustment(
@@ -90,23 +103,19 @@
sizeof(VertexAttributes), vertex_buffer_ +
offsetof(VertexAttributes, position));
graphics_state->VertexAttribPointer(
- program->GetVertexShader().a_color(), 4, GL_UNSIGNED_BYTE, GL_TRUE,
+ program->GetVertexShader().a_rcorner(), 4, GL_FLOAT, GL_FALSE,
sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, color));
- graphics_state->VertexAttribPointer(
- program->GetVertexShader().a_offset(), 2, GL_FLOAT, GL_FALSE,
- sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, offset));
+ offsetof(VertexAttributes, rcorner));
graphics_state->VertexAttribFinish();
- SetRRectUniforms(program->GetFragmentShader().u_rect(),
- program->GetFragmentShader().u_corners(),
- rect_, corners_, 0.5f);
- GL_CALL(glDrawArrays(GL_TRIANGLE_FAN, 0, kVertexCount));
+
+ GL_CALL(glUniform4f(program->GetFragmentShader().u_color(),
+ color_.r(), color_.g(), color_.b(), color_.a()));
+ GL_CALL(glDrawArrays(GL_TRIANGLES, 0, kVertexCount));
}
base::TypeId DrawRRectColor::GetTypeId() const {
- return ShaderProgram<ShaderVertexColorOffset,
- ShaderFragmentColorRrect>::GetTypeId();
+ return ShaderProgram<ShaderVertexRcorner,
+ ShaderFragmentRcornerColor>::GetTypeId();
}
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.h b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.h
index c5cd055..de71e9e 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.h
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color.h
@@ -41,9 +41,14 @@
base::TypeId GetTypeId() const OVERRIDE;
private:
+ struct VertexAttributes {
+ float position[2];
+ RCorner rcorner;
+ };
+
math::RectF rect_;
render_tree::RoundedCorners corners_;
- uint32_t color_;
+ render_tree::ColorRGBA color_;
uint8_t* vertex_buffer_;
};
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.cc b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.cc
index 7ec422b..dbcbec1 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.cc
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.cc
@@ -27,22 +27,7 @@
namespace egl {
namespace {
-const int kVertexCount = 4;
-
-struct VertexAttributes {
- float position[2];
- float offset[2];
- float texcoord[2];
-};
-
-void SetVertex(VertexAttributes* vertex, float x, float y, float u, float v) {
- vertex->position[0] = x;
- vertex->position[1] = y;
- vertex->offset[0] = x;
- vertex->offset[1] = y;
- vertex->texcoord[0] = u;
- vertex->texcoord[1] = v;
-}
+const int kVertexCount = 4 * 6;
} // namespace
DrawRRectColorTexture::DrawRRectColorTexture(GraphicsState* graphics_state,
@@ -60,25 +45,58 @@
DCHECK(base_state_.rounded_scissor_corners);
color_ = GetDrawColor(color) * base_state_.opacity;
graphics_state->ReserveVertexData(kVertexCount * sizeof(VertexAttributes));
+
+ // Extract scale from the transform and move it into the vertex attributes
+ // so that the anti-aliased edges remain 1 pixel wide.
+ math::Vector2dF scale = RemoveScaleFromTransform();
+ rect_.Scale(scale.x(), scale.y());
+ base_state_.rounded_scissor_rect.Scale(scale.x(), scale.y());
+ base_state_.rounded_scissor_corners =
+ base_state_.rounded_scissor_corners->Scale(scale.x(), scale.y());
}
void DrawRRectColorTexture::ExecuteUpdateVertexBuffer(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
+ const float kWidthScale = 1.0f / rect_.width();
+ const float kHeightScale = 1.0f / rect_.height();
+
VertexAttributes attributes[kVertexCount];
- SetVertex(&attributes[0], rect_.x(), rect_.y(),
- texcoord_transform_(0, 2), texcoord_transform_(1, 2)); // uv = (0,0)
- SetVertex(&attributes[1], rect_.right(), rect_.y(),
- texcoord_transform_(0, 0) + texcoord_transform_(0, 2), // uv = (1,0)
- texcoord_transform_(1, 0) + texcoord_transform_(1, 2));
- SetVertex(&attributes[2], rect_.right(), rect_.bottom(),
- texcoord_transform_(0, 0) + texcoord_transform_(0, 1) + // uv = (1,1)
- texcoord_transform_(0, 2),
- texcoord_transform_(1, 0) + texcoord_transform_(1, 1) +
- texcoord_transform_(1, 2));
- SetVertex(&attributes[3], rect_.x(), rect_.bottom(),
- texcoord_transform_(0, 1) + texcoord_transform_(0, 2), // uv = (0,1)
- texcoord_transform_(1, 1) + texcoord_transform_(1, 2));
+ RRectAttributes rrect[4];
+ GetRRectAttributes(rect_, base_state_.rounded_scissor_rect,
+ *base_state_.rounded_scissor_corners, rrect);
+ for (int r = 0, v = 0; r < arraysize(rrect); ++r) {
+ attributes[v ].position[0] = rrect[r].bounds.x();
+ attributes[v ].position[1] = rrect[r].bounds.y();
+ attributes[v ].rcorner =
+ RCorner(attributes[v ].position, rrect[r].rcorner);
+ attributes[v+1].position[0] = rrect[r].bounds.right();
+ attributes[v+1].position[1] = rrect[r].bounds.y();
+ attributes[v+1].rcorner =
+ RCorner(attributes[v+1].position, rrect[r].rcorner);
+ attributes[v+2].position[0] = rrect[r].bounds.x();
+ attributes[v+2].position[1] = rrect[r].bounds.bottom();
+ attributes[v+2].rcorner =
+ RCorner(attributes[v+2].position, rrect[r].rcorner);
+ attributes[v+3].position[0] = rrect[r].bounds.right();
+ attributes[v+3].position[1] = rrect[r].bounds.bottom();
+ attributes[v+3].rcorner =
+ RCorner(attributes[v+3].position, rrect[r].rcorner);
+
+ for (int t = v; t < v + 4; ++t) {
+ math::PointF texcoord(
+ (attributes[t].position[0] - rect_.x()) * kWidthScale,
+ (attributes[t].position[1] - rect_.y()) * kHeightScale);
+ texcoord = texcoord_transform_ * texcoord;
+ attributes[t].texcoord[0] = texcoord.x();
+ attributes[t].texcoord[1] = texcoord.y();
+ }
+
+ attributes[v+4] = attributes[v+1];
+ attributes[v+5] = attributes[v+2];
+ v += 6;
+ }
+
vertex_buffer_ = graphics_state->AllocateVertexData(sizeof(attributes));
SbMemoryCopy(vertex_buffer_, attributes, sizeof(attributes));
@@ -119,8 +137,8 @@
void DrawRRectColorTexture::ExecuteRasterize(
GraphicsState* graphics_state,
ShaderProgramManager* program_manager) {
- ShaderProgram<ShaderVertexOffsetTexcoord,
- ShaderFragmentTexcoordColorRrect>* program;
+ ShaderProgram<ShaderVertexRcornerTexcoord,
+ ShaderFragmentRcornerTexcoordColor>* program;
program_manager->GetProgram(&program);
graphics_state->UseProgram(program->GetHandle());
graphics_state->UpdateClipAdjustment(
@@ -135,19 +153,15 @@
sizeof(VertexAttributes), vertex_buffer_ +
offsetof(VertexAttributes, position));
graphics_state->VertexAttribPointer(
- program->GetVertexShader().a_offset(), 2, GL_FLOAT, GL_FALSE,
+ program->GetVertexShader().a_rcorner(), 4, GL_FLOAT, GL_FALSE,
sizeof(VertexAttributes), vertex_buffer_ +
- offsetof(VertexAttributes, offset));
+ offsetof(VertexAttributes, rcorner));
graphics_state->VertexAttribPointer(
program->GetVertexShader().a_texcoord(), 2, GL_FLOAT, GL_FALSE,
sizeof(VertexAttributes), vertex_buffer_ +
offsetof(VertexAttributes, texcoord));
graphics_state->VertexAttribFinish();
- SetRRectUniforms(program->GetFragmentShader().u_rect(),
- program->GetFragmentShader().u_corners(),
- base_state_.rounded_scissor_rect,
- *base_state_.rounded_scissor_corners, 0.5f);
GL_CALL(glUniform4f(program->GetFragmentShader().u_color(),
color_.r(), color_.g(), color_.b(), color_.a()));
GL_CALL(glUniform4fv(program->GetFragmentShader().u_texcoord_clamp(), 1,
@@ -157,7 +171,7 @@
graphics_state->ActiveBindTexture(
program->GetFragmentShader().u_texture_texunit(),
texture_->GetTarget(), texture_->gl_handle(), GL_REPEAT);
- GL_CALL(glDrawArrays(GL_TRIANGLE_FAN, 0, kVertexCount));
+ GL_CALL(glDrawArrays(GL_TRIANGLES, 0, kVertexCount));
graphics_state->ActiveBindTexture(
program->GetFragmentShader().u_texture_texunit(),
texture_->GetTarget(), texture_->gl_handle(), GL_CLAMP_TO_EDGE);
@@ -165,13 +179,13 @@
graphics_state->ActiveBindTexture(
program->GetFragmentShader().u_texture_texunit(),
texture_->GetTarget(), texture_->gl_handle());
- GL_CALL(glDrawArrays(GL_TRIANGLE_FAN, 0, kVertexCount));
+ GL_CALL(glDrawArrays(GL_TRIANGLES, 0, kVertexCount));
}
}
base::TypeId DrawRRectColorTexture::GetTypeId() const {
- return ShaderProgram<ShaderVertexOffsetTexcoord,
- ShaderFragmentTexcoordColorRrect>::GetTypeId();
+ return ShaderProgram<ShaderVertexRcornerTexcoord,
+ ShaderFragmentRcornerTexcoordColor>::GetTypeId();
}
} // namespace egl
diff --git a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.h b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.h
index c65347e..98a5c84 100644
--- a/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.h
+++ b/src/cobalt/renderer/rasterizer/egl/draw_rrect_color_texture.h
@@ -44,6 +44,12 @@
base::TypeId GetTypeId() const OVERRIDE;
private:
+ struct VertexAttributes {
+ float position[2];
+ float texcoord[2];
+ RCorner rcorner;
+ };
+
math::Matrix3F texcoord_transform_;
math::RectF rect_;
render_tree::ColorRGBA color_;
diff --git a/src/cobalt/renderer/rasterizer/egl/shader_program_manager.cc b/src/cobalt/renderer/rasterizer/egl/shader_program_manager.cc
index 0418992..74aba4b 100644
--- a/src/cobalt/renderer/rasterizer/egl/shader_program_manager.cc
+++ b/src/cobalt/renderer/rasterizer/egl/shader_program_manager.cc
@@ -17,6 +17,7 @@
#include <GLES2/gl2.h>
#include "cobalt/renderer/backend/egl/utils.h"
+#include "egl/generated_shader_impl.h"
namespace cobalt {
namespace renderer {
@@ -24,6 +25,13 @@
namespace egl {
ShaderProgramManager::ShaderProgramManager() {
+ // These are shaders that get instantiated during video playback when the
+ // users starts interacting with the transport controls. They are preloaded
+ // to prevent UI-hiccups.
+ // These shaders are generated from egl/generated_shader_impl.h
+ Preload<ShaderVertexOffsetRcorner, ShaderFragmentColorBlurRrects>();
+ Preload<ShaderVertexColorOffset, ShaderFragmentColorInclude>();
+ Preload<ShaderVertexRcornerTexcoord, ShaderFragmentRcornerTexcoordColor>();
}
ShaderProgramManager::~ShaderProgramManager() {
diff --git a/src/cobalt/renderer/rasterizer/egl/shader_program_manager.h b/src/cobalt/renderer/rasterizer/egl/shader_program_manager.h
index f9c396e..7184e57 100644
--- a/src/cobalt/renderer/rasterizer/egl/shader_program_manager.h
+++ b/src/cobalt/renderer/rasterizer/egl/shader_program_manager.h
@@ -44,6 +44,9 @@
ShaderProgramBase* FindProgram(base::TypeId program_type_id);
void AddProgram(base::TypeId program_type_id, ShaderProgramBase* program);
+ template <typename VertextShaderT, typename FragmentShaderT>
+ void Preload();
+
typedef base::linked_hash_map<base::TypeId, ShaderProgramBase*> ProgramMap;
ProgramMap program_map_;
};
@@ -59,6 +62,12 @@
*out_program = base::polymorphic_downcast<ShaderProgramType*>(program);
}
+template <typename VertextShaderT, typename FragmentShaderT>
+inline void ShaderProgramManager::Preload() {
+ ShaderProgram<VertextShaderT, FragmentShaderT>* program;
+ GetProgram(&program);
+}
+
} // namespace egl
} // namespace rasterizer
} // namespace renderer
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_between_rrects.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_between_rrects.glsl
deleted file mode 100644
index f2897ad..0000000
--- a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_between_rrects.glsl
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-// A rounded rect is represented by a vec4 specifying (min.xy, max.xy), and a
-// matrix of corners. Each vector in the matrix represents a corner (order:
-// top left, top right, bottom left, bottom right). Each corner vec4 represents
-// (start.xy, radius.xy).
-uniform vec4 u_inner_rect;
-uniform mat4 u_inner_corners;
-uniform vec4 u_outer_rect;
-uniform mat4 u_outer_corners;
-
-varying vec2 v_offset;
-varying vec4 v_color;
-
-#include "function_is_outside_rrect.inc"
-
-void main() {
- float inner_scale = IsOutsideRRect(v_offset, u_inner_rect, u_inner_corners);
- float outer_scale = IsOutsideRRect(v_offset, u_outer_rect, u_outer_corners);
- gl_FragColor = v_color * (inner_scale * (1.0 - outer_scale));
-}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur.glsl
index 50ebd6a..4f15571 100644
--- a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur.glsl
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur.glsl
@@ -13,11 +13,12 @@
// limitations under the License.
precision mediump float;
+
+uniform vec4 u_color;
uniform vec4 u_blur_rect;
uniform vec2 u_scale_add;
varying vec2 v_offset;
-varying vec4 v_color;
#include "function_gaussian_integral.inc"
@@ -27,5 +28,5 @@
float integral = GaussianIntegral(u_blur_rect.xz - v_offset.xx) *
GaussianIntegral(u_blur_rect.yw - v_offset.yy);
float blur_scale = integral * u_scale_add.x + u_scale_add.y;
- gl_FragColor = v_color * blur_scale;
+ gl_FragColor = u_color * blur_scale;
}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur_rrects.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur_rrects.glsl
index ee8c4ae..efc8747 100644
--- a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur_rrects.glsl
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur_rrects.glsl
@@ -14,13 +14,6 @@
precision mediump float;
-// A rounded rect is represented by a vec4 specifying (min.xy, max.xy)
-// and a matrix of corners. Each vector in the matrix represents a corner
-// (order: top left, top right, bottom left, bottom right). Each corner vec4
-// represents (start.xy, radius.xy).
-uniform vec4 u_scissor_rect;
-uniform mat4 u_scissor_corners;
-
// The rounded spread rect is represented in a way to optimize calculation of
// the extents. Each element of a vec4 represents a corner's value -- order
// is top left, top right, bottom left, bottom right. Extents for each corner
@@ -49,14 +42,16 @@
// inset shadow with scissor rect behaving as an inclusive scissor.
uniform vec2 u_scale_add;
+uniform vec4 u_color;
+
// Blur calculations happen in terms in sigma distances. Use sigma_scale to
// translate pixel distances into sigma distances.
uniform vec2 u_sigma_scale;
varying vec2 v_offset;
-varying vec4 v_color;
+varying vec4 v_rcorner;
-#include "function_is_outside_rrect.inc"
+#include "function_is_outside_rcorner.inc"
#include "function_gaussian_integral.inc"
vec2 GetXExtents(float y) {
@@ -126,8 +121,7 @@
void main() {
float scissor_scale =
- IsOutsideRRect(v_offset, u_scissor_rect, u_scissor_corners) *
- u_scale_add.x + u_scale_add.y;
+ IsOutsideRCorner(v_rcorner) * u_scale_add.x + u_scale_add.y;
float blur_scale = GetBlur(v_offset) * u_scale_add.x + u_scale_add.y;
- gl_FragColor = v_color * (blur_scale * scissor_scale);
+ gl_FragColor = u_color * (blur_scale * scissor_scale);
}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_rrect.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_rrect.glsl
deleted file mode 100644
index 26549ed..0000000
--- a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_color_rrect.glsl
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-// A rounded rect is represented by a vec4 specifying (min.xy, max.xy), and a
-// matrix of corners. Each vector in the matrix represents a corner (order:
-// top left, top right, bottom left, bottom right). Each corner vec4 represents
-// (start.xy, radius.xy).
-uniform vec4 u_rect;
-uniform mat4 u_corners;
-
-varying vec2 v_offset;
-varying vec4 v_color;
-
-#include "function_is_outside_rrect.inc"
-
-void main() {
- float scale = IsOutsideRRect(v_offset, u_rect, u_corners);
- gl_FragColor = v_color * (1.0 - scale);
-}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner2_color.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner2_color.glsl
new file mode 100644
index 0000000..c7a8601
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner2_color.glsl
@@ -0,0 +1,26 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+precision mediump float;
+
+uniform vec4 u_color;
+varying vec4 v_rcorner_inner;
+varying vec4 v_rcorner_outer;
+
+#include "function_is_outside_rcorner.inc"
+
+void main() {
+ float inner_scale = IsOutsideRCorner(v_rcorner_inner);
+ float outer_scale = 1.0 - IsOutsideRCorner(v_rcorner_outer);
+ gl_FragColor = u_color * (inner_scale * outer_scale);
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_color.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_color.glsl
new file mode 100644
index 0000000..02c2425
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_color.glsl
@@ -0,0 +1,25 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+precision mediump float;
+
+uniform vec4 u_color;
+varying vec4 v_rcorner;
+
+#include "function_is_outside_rcorner.inc"
+
+void main() {
+ float scale = IsOutsideRCorner(v_rcorner);
+ gl_FragColor = u_color * (1.0 - scale);
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_texcoord_color.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_texcoord_color.glsl
new file mode 100644
index 0000000..4676317
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_texcoord_color.glsl
@@ -0,0 +1,30 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+precision mediump float;
+
+uniform vec4 u_color;
+uniform vec4 u_texcoord_clamp;
+uniform sampler2D u_texture;
+
+varying vec4 v_rcorner;
+varying vec2 v_texcoord;
+
+#include "function_is_outside_rcorner.inc"
+
+void main() {
+ float scale = IsOutsideRCorner(v_rcorner);
+ gl_FragColor = u_color * (1.0 - scale) * texture2D(u_texture,
+ clamp(v_texcoord, u_texcoord_clamp.xy, u_texcoord_clamp.zw));
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_texcoord_color_rrect.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/fragment_texcoord_color_rrect.glsl
deleted file mode 100644
index 34a25ad..0000000
--- a/src/cobalt/renderer/rasterizer/egl/shaders/fragment_texcoord_color_rrect.glsl
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-precision mediump float;
-
-uniform vec4 u_color;
-uniform vec4 u_texcoord_clamp;
-uniform sampler2D u_texture;
-
-// A rounded rect is represented by a vec4 specifying (min.xy, max.xy), and a
-// matrix of corners. Each vector in the matrix represents a corner (order:
-// top left, top right, bottom left, bottom right). Each corner vec4 represents
-// (start.xy, radius.xy).
-uniform vec4 u_rect;
-uniform mat4 u_corners;
-
-varying vec2 v_offset;
-varying vec2 v_texcoord;
-
-#include "function_is_outside_rrect.inc"
-
-void main() {
- float scale = IsOutsideRRect(v_offset, u_rect, u_corners);
- gl_FragColor = u_color * (1.0 - scale) * texture2D(u_texture,
- clamp(v_texcoord, u_texcoord_clamp.xy, u_texcoord_clamp.zw));
-}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rcorner.inc b/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rcorner.inc
new file mode 100644
index 0000000..5233ae9
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rcorner.inc
@@ -0,0 +1,37 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Return 0 if the given position is inside the rounded corner, or scale
+// towards 1 as it goes outside a 1-pixel anti-aliasing border.
+// |rcorner| is a vec4 representing (scaled.xy, 1 / radius.xy) with scaled.xy
+// representing the offset of the current position in terms of radius.xy
+// (i.e. offset.xy / radius.xy). The scaled.xy values can be negative if the
+// current position is outside the corner start.
+float IsOutsideRCorner(vec4 rcorner) {
+ // Estimate the distance to an implicit function using
+ // dist = f(x,y) / length(gradient(f(x,y)))
+ // For an ellipse, f(x,y) = x^2 / a^2 + y^2 / b^2 - 1.
+ vec2 scaled = max(rcorner.xy, 0.0);
+ float implicit = dot(scaled, scaled) - 1.0;
+
+ // NOTE: To accommodate large radius values using mediump floats, rcorner.zw
+ // was scaled by kRCornerGradientScale in the vertex attribute data.
+ // Multiply inv_gradient by kRCornerGradientScale to undo that scaling.
+ const float kRCornerGradientScale = 16.0;
+ vec2 gradient = 2.0 * scaled * rcorner.zw;
+ float inv_gradient = kRCornerGradientScale *
+ inversesqrt(max(dot(gradient, gradient), 0.0001));
+
+ return clamp(0.5 + implicit * inv_gradient, 0.0, 1.0);
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rrect.inc b/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rrect.inc
deleted file mode 100644
index e3e58b9..0000000
--- a/src/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rrect.inc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Return 0 if the given point is inside the rounded rect, or scale towards 1
-// as it goes outside a 1-pixel anti-aliasing border.
-// |rect| represents (min.xy, max.xy) of the encompassing rectangle.
-// |corners| is a matrix with each vec4 representing (start.xy, radius.xy) of
-// a corner. The order is top left, top right, bottom left, bottom right.
-float IsOutsideRRect(vec2 point, vec4 rect, mat4 corners) {
- vec4 select_corner = vec4(
- step(point.x, corners[0].x) * step(point.y, corners[0].y),
- step(corners[1].x, point.x) * step(point.y, corners[1].y),
- step(point.x, corners[2].x) * step(corners[2].y, point.y),
- step(corners[3].x, point.x) * step(corners[3].y, point.y));
- if (dot(select_corner, vec4(1.0)) > 0.5) {
- // Estimate the amount of anti-aliasing that should be used by comparing
- // x^2 / a^2 + y^2 / b^2 for the ellipse and ellipse + 1 pixel.
- vec4 corner = corners * select_corner;
- vec2 pixel_offset = point - corner.xy;
-
- if (abs(corner.z - corner.w) < 0.1) {
- // This is a square or round corner.
- return clamp(length(pixel_offset) - corner.z, 0.0, 1.0);
- }
-
- vec2 offset_min = pixel_offset / corner.zw;
- vec2 offset_max = pixel_offset / (corner.zw + vec2(1.0));
- float result_min = dot(offset_min, offset_min);
- float result_max = dot(offset_max, offset_max);
-
- // Return 1.0 if outside, or interpolate if in the border, or 0 if inside.
- return (result_max >= 1.0) ? 1.0 :
- max(result_min - 1.0, 0.0) / (result_min - result_max);
- }
-
- return clamp(rect.x - point.x, 0.0, 1.0) +
- clamp(point.x - rect.z, 0.0, 1.0) +
- clamp(rect.y - point.y, 0.0, 1.0) +
- clamp(point.y - rect.w, 0.0, 1.0);
-}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/shaders.gyp b/src/cobalt/renderer/rasterizer/egl/shaders/shaders.gyp
index 9d00899..2ebc954 100644
--- a/src/cobalt/renderer/rasterizer/egl/shaders/shaders.gyp
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/shaders.gyp
@@ -20,22 +20,25 @@
'generate_class_script': '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/generate_shader_impl.py',
'shader_sources': [
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color.glsl',
- '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_between_rrects.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_blur_rrects.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_include.glsl',
- '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_rrect.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_color_texcoord.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_opacity_texcoord1d.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_color.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner2_color.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_rcorner_texcoord_color.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_texcoord.glsl',
- '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/fragment_texcoord_color_rrect.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/function_gaussian_integral.inc',
- '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rrect.inc',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/function_is_outside_rcorner.inc',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_color.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_color_offset.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_color_texcoord.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_offset.glsl',
- '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_texcoord.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_rcorner.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner2.glsl',
+ '<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner_texcoord.glsl',
'<(DEPTH)/cobalt/renderer/rasterizer/egl/shaders/vertex_texcoord.glsl',
],
},
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_rcorner.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_rcorner.glsl
new file mode 100644
index 0000000..3d9ff46
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_rcorner.glsl
@@ -0,0 +1,28 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+uniform vec4 u_clip_adjustment;
+uniform mat3 u_view_matrix;
+attribute vec2 a_position;
+attribute vec4 a_rcorner;
+varying vec2 v_offset;
+varying vec4 v_rcorner;
+
+void main() {
+ vec3 pos2d = u_view_matrix * vec3(a_position, 1);
+ gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
+ u_clip_adjustment.zw, 0, pos2d.z);
+ v_offset = a_position;
+ v_rcorner = a_rcorner;
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_texcoord.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_texcoord.glsl
deleted file mode 100644
index df81d24..0000000
--- a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_offset_texcoord.glsl
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-uniform vec4 u_clip_adjustment;
-uniform mat3 u_view_matrix;
-attribute vec2 a_position;
-attribute vec2 a_offset;
-attribute vec2 a_texcoord;
-varying vec2 v_offset;
-varying vec2 v_texcoord;
-
-void main() {
- vec3 pos2d = u_view_matrix * vec3(a_position, 1);
- gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
- u_clip_adjustment.zw, 0, pos2d.z);
- v_offset = a_offset;
- v_texcoord = a_texcoord;
-}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner.glsl
new file mode 100644
index 0000000..a84d4f2
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner.glsl
@@ -0,0 +1,26 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+uniform vec4 u_clip_adjustment;
+uniform mat3 u_view_matrix;
+attribute vec2 a_position;
+attribute vec4 a_rcorner;
+varying vec4 v_rcorner;
+
+void main() {
+ vec3 pos2d = u_view_matrix * vec3(a_position, 1);
+ gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
+ u_clip_adjustment.zw, 0, pos2d.z);
+ v_rcorner = a_rcorner;
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner2.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner2.glsl
new file mode 100644
index 0000000..fa4d155
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner2.glsl
@@ -0,0 +1,28 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+uniform vec4 u_clip_adjustment;
+uniform mat3 u_view_matrix;
+attribute vec2 a_position;
+attribute vec4 a_rcorner_inner;
+attribute vec4 a_rcorner_outer;
+varying vec4 v_rcorner_inner;
+varying vec4 v_rcorner_outer;
+
+void main() {
+ vec3 pos2d = u_view_matrix * vec3(a_position, 1);
+ gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
+ u_clip_adjustment.zw, 0, pos2d.z);
+ v_rcorner_inner = a_rcorner_inner;
+ v_rcorner_outer = a_rcorner_outer;
+}
diff --git a/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner_texcoord.glsl b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner_texcoord.glsl
new file mode 100644
index 0000000..103d1ab
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/egl/shaders/vertex_rcorner_texcoord.glsl
@@ -0,0 +1,29 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+uniform vec4 u_clip_adjustment;
+uniform mat3 u_view_matrix;
+attribute vec2 a_position;
+attribute vec4 a_rcorner;
+attribute vec2 a_texcoord;
+varying vec4 v_rcorner;
+varying vec2 v_texcoord;
+
+void main() {
+ vec3 pos2d = u_view_matrix * vec3(a_position, 1);
+ gl_Position = vec4(pos2d.xy * u_clip_adjustment.xy +
+ u_clip_adjustment.zw, 0, pos2d.z);
+ v_rcorner = a_rcorner;
+ v_texcoord = a_texcoord;
+}
diff --git a/src/cobalt/renderer/rasterizer/pixel_test.cc b/src/cobalt/renderer/rasterizer/pixel_test.cc
index a3f66eb..760bb4d 100644
--- a/src/cobalt/renderer/rasterizer/pixel_test.cc
+++ b/src/cobalt/renderer/rasterizer/pixel_test.cc
@@ -216,6 +216,44 @@
RotateMatrix(static_cast<float>(M_PI) / 6.0f)));
}
+TEST_F(PixelTest, ScaledThenRotatedRectWithDifferentRoundedCorners) {
+ RoundedCorner top_left(6, 15);
+ RoundedCorner top_right(0, 0);
+ RoundedCorner bottom_right(6, 25);
+ RoundedCorner bottom_left(2, 25);
+
+ scoped_ptr<RoundedCorners> rounded_corners(
+ new RoundedCorners(top_left, top_right, bottom_right, bottom_left));
+
+ TestTree(new MatrixTransformNode(
+ new RectNode(RectF(-7, -25, 14, 50),
+ scoped_ptr<Brush>(
+ new SolidColorBrush(ColorRGBA(1, 1, 1, 1))),
+ rounded_corners.Pass()),
+ TranslateMatrix(100.0f, 100.0f) *
+ RotateMatrix(static_cast<float>(M_PI) / 3.0f) *
+ ScaleMatrix(-10.0f, 2.0f)));
+}
+
+TEST_F(PixelTest, RotatedThenScaledRectWithDifferentRoundedCorners) {
+ RoundedCorner top_left(4, 7);
+ RoundedCorner top_right(0, 0);
+ RoundedCorner bottom_right(10, 2);
+ RoundedCorner bottom_left(5, 3);
+
+ scoped_ptr<RoundedCorners> rounded_corners(
+ new RoundedCorners(top_left, top_right, bottom_right, bottom_left));
+
+ TestTree(new MatrixTransformNode(
+ new RectNode(RectF(-10, -7, 20, 14),
+ scoped_ptr<Brush>(
+ new SolidColorBrush(ColorRGBA(1, 1, 1, 1))),
+ rounded_corners.Pass()),
+ TranslateMatrix(100.0f, 100.0f) *
+ ScaleMatrix(6.0f, 9.0f) *
+ RotateMatrix(static_cast<float>(M_PI) / 6.0f)));
+}
+
TEST_F(PixelTest, RedRectWithDifferentRoundedCornersOnTopLeftOfSurface) {
RoundedCorner top_left(10, 10);
RoundedCorner top_right(20, 20);
@@ -1952,6 +1990,19 @@
new ImageNode(image)));
}
+TEST_F(PixelTest, ScaledThenRotatedRoundedCornersViewportOverImage) {
+ scoped_refptr<Image> image =
+ CreateColoredCheckersImage(GetResourceProvider(), output_surface_size());
+
+ TestTree(new MatrixTransformNode(
+ new FilterNode(
+ ViewportFilter(RectF(25, 5, 150, 10), RoundedCorners(25, 2)),
+ new ImageNode(image)),
+ TranslateMatrix(-30, 130) *
+ RotateMatrix(static_cast<float>(M_PI / 3.0f)) *
+ ScaleMatrix(1.0f, 10.0f)));
+}
+
TEST_F(PixelTest, RoundedCornersViewportOverWrappingImage) {
scoped_refptr<Image> image =
CreateColoredCheckersImage(GetResourceProvider(), output_surface_size());
@@ -2829,6 +2880,14 @@
Shadow(Vector2dF(0.0f, 0.0f), 100.0f, ColorRGBA(0, 0, 0, 1))));
}
+TEST_F(PixelTest, ScaledBoxShadowWithSpreadAndBlurCentered) {
+ TestTree(new MatrixTransformNode(CreateShadowRectWithBackground(
+ output_surface_size(), ColorRGBA(1.0f, 1.0f, 1.0f, 1.0f),
+ ColorRGBA(0.5f, 0.5f, 0.5f, 1.0f), RectF(50, 8, 100, 4),
+ Shadow(Vector2dF(0.0f, 0.0f), 3.0f, ColorRGBA(0, 0, 0, 1)), false, 5.0f),
+ ScaleMatrix(1.0f, 10.0f)));
+}
+
TEST_F(PixelTest, TransparentBoxShadowBlurOnGreenBackgroundCentered) {
TestTree(CreateShadowRectWithBackground(
output_surface_size(), ColorRGBA(0.3f, 0.8f, 0.3f, 1.0f),
@@ -2916,6 +2975,15 @@
RoundedCorners(25, 25)));
}
+TEST_F(PixelTest, ScaledBoxShadowEllipseWithOutset5pxSpreadAndRoundedCorners) {
+ TestTree(new MatrixTransformNode(CreateShadowRectWithBackground(
+ output_surface_size(), ColorRGBA(1.0f, 1.0f, 1.0f, 1.0f),
+ ColorRGBA(0.5f, 0.5f, 0.5f, 1.0f), RectF(6, 25, 2, 100),
+ Shadow(Vector2dF(0.0f, 0.0f), 0.0f, ColorRGBA(0, 0, 0, 1)), false, 5.0f,
+ RoundedCorners(1, 50)),
+ ScaleMatrix(15.0f, 1.0f)));
+}
+
TEST_F(PixelTest, BoxShadowCircleWithInset25pxSpread1pxBlurAndRoundedCorners) {
TestTree(CreateShadowRectWithBackground(
output_surface_size(), ColorRGBA(1.0f, 1.0f, 1.0f, 1.0f),
@@ -3034,6 +3102,16 @@
}
TEST_F(PixelTest,
+ ScaledBoxShadowEllipseWithOutset25pxSpread3pxBlurAndRoundedCorners) {
+ TestTree(new MatrixTransformNode(CreateShadowRectWithBackground(
+ output_surface_size(), ColorRGBA(1.0f, 1.0f, 1.0f, 1.0f),
+ ColorRGBA(0.5f, 0.5f, 0.5f, 1.0f), RectF(20, 5, 140, 10),
+ Shadow(Vector2dF(8.0f, 1.0f), 3.0f, ColorRGBA(0, 0, 0, 1)), false, 4.0f,
+ RoundedCorners(70, 5)),
+ ScaleMatrix(1.0f, 10.0f)));
+}
+
+TEST_F(PixelTest,
BoxShadowEllipseWithInset25pxSpread50pxBlurAndRoundedCorners) {
TestTree(CreateShadowRectWithBackground(
output_surface_size(), ColorRGBA(1.0f, 1.0f, 1.0f, 1.0f),
diff --git a/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc b/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
index 576969c..ce39cca 100644
--- a/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
+++ b/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.cc
@@ -60,7 +60,7 @@
// on multiple threads simultaneously later.
SkSafeUnref(SkFontMgr::RefDefault());
-#if SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
decode_target_graphics_context_provider_.egl_display =
cobalt_context_->system_egl()->GetDisplay();
decode_target_graphics_context_provider_.egl_context =
@@ -68,7 +68,7 @@
decode_target_graphics_context_provider_.gles_context_runner =
&HardwareResourceProvider::GraphicsContextRunner;
decode_target_graphics_context_provider_.gles_context_runner_context = this;
-#endif // SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
}
HardwareResourceProvider::~HardwareResourceProvider() {
@@ -146,7 +146,7 @@
self_message_loop_));
}
-#if SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
namespace {
#if SB_API_VERSION < SB_DECODE_TARGET_PLANES_FOR_FORMAT
@@ -378,7 +378,7 @@
}
}
-#endif // SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
scoped_ptr<RawImageMemory> HardwareResourceProvider::AllocateRawImageMemory(
size_t size_in_bytes, size_t alignment) {
diff --git a/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.h b/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.h
index 33ed4d2..5ad0c4a 100644
--- a/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.h
+++ b/src/cobalt/renderer/rasterizer/skia/hardware_resource_provider.h
@@ -56,7 +56,6 @@
scoped_ptr<render_tree::ImageData> pixel_data) OVERRIDE;
#if SB_HAS(GRAPHICS)
-#if SB_API_VERSION >= 4
scoped_refptr<render_tree::Image> CreateImageFromSbDecodeTarget(
SbDecodeTarget decode_target) OVERRIDE;
@@ -71,24 +70,6 @@
// Whether SbDecodeTargetIsSupported or not.
bool SupportsSbDecodeTarget() OVERRIDE { return true; }
-#elif SB_API_VERSION >= 3
-
- scoped_refptr<render_tree::Image> CreateImageFromSbDecodeTarget(
- SbDecodeTarget decode_target) OVERRIDE {
- NOTREACHED()
- << "CreateImageFromSbDecodeTarget is not supported on EGL yet.";
- SbDecodeTargetDestroy(decode_target);
- return NULL;
- }
-
- // Return the associated SbDecodeTargetProvider with the ResourceProvider,
- // if it exists. Returns NULL if SbDecodeTarget is not supported.
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() OVERRIDE { return NULL; }
-
- // Whether SbDecodeTargetIsSupported or not.
- bool SupportsSbDecodeTarget() OVERRIDE { return false; }
-
-#endif // SB_API_VERSION >= 4
#endif // SB_HAS(GRAPHICS)
scoped_ptr<render_tree::RawImageMemory> AllocateRawImageMemory(
@@ -147,7 +128,7 @@
TextShaper text_shaper_;
int max_texture_size_;
-#if SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#if SB_HAS(GRAPHICS)
static void GraphicsContextRunner(
SbDecodeTargetGraphicsContextProvider* graphics_context_provider,
SbDecodeTargetGlesContextRunnerTarget target_function,
@@ -155,7 +136,7 @@
SbDecodeTargetGraphicsContextProvider
decode_target_graphics_context_provider_;
-#endif // SB_API_VERSION >= 4 && SB_HAS(GRAPHICS)
+#endif // SB_HAS(GRAPHICS)
// We keep a handle to the message loop that this resource provider was
// created on. This message loop is used whenever we need to issue graphics
diff --git a/src/cobalt/renderer/rasterizer/skia/render_tree_node_visitor.cc b/src/cobalt/renderer/rasterizer/skia/render_tree_node_visitor.cc
index 0f2159b..918a60e 100644
--- a/src/cobalt/renderer/rasterizer/skia/render_tree_node_visitor.cc
+++ b/src/cobalt/renderer/rasterizer/skia/render_tree_node_visitor.cc
@@ -949,6 +949,11 @@
SkiaBrushVisitor brush_visitor(&paint, *draw_state);
brush->Accept(&brush_visitor);
+ if (!draw_state->render_target->getTotalMatrix().preservesAxisAlignment()) {
+ // Enable anti-aliasing if we're rendering a rotated or skewed box.
+ paint.setAntiAlias(true);
+ }
+
draw_state->render_target->drawRect(
SkRect::MakeXYWH(rect.x(), rect.y(), rect.width(), rect.height()), paint);
}
diff --git a/src/cobalt/renderer/rasterizer/skia/software_resource_provider.h b/src/cobalt/renderer/rasterizer/skia/software_resource_provider.h
index 67e8e2d..492d3ab 100644
--- a/src/cobalt/renderer/rasterizer/skia/software_resource_provider.h
+++ b/src/cobalt/renderer/rasterizer/skia/software_resource_provider.h
@@ -47,7 +47,6 @@
scoped_ptr<render_tree::ImageData> pixel_data) OVERRIDE;
#if SB_HAS(GRAPHICS)
-#if SB_API_VERSION >= 4
scoped_refptr<render_tree::Image> CreateImageFromSbDecodeTarget(
SbDecodeTarget decode_target) OVERRIDE {
NOTREACHED();
@@ -61,18 +60,6 @@
}
bool SupportsSbDecodeTarget() OVERRIDE { return false; }
-#elif SB_API_VERSION >= 3
- scoped_refptr<render_tree::Image> CreateImageFromSbDecodeTarget(
- SbDecodeTarget decode_target) OVERRIDE {
- NOTREACHED();
- SbDecodeTargetDestroy(decode_target);
- return NULL;
- }
-
- SbDecodeTargetProvider* GetSbDecodeTargetProvider() OVERRIDE { return NULL; }
-
- bool SupportsSbDecodeTarget() OVERRIDE { return false; }
-#endif // SB_API_VERSION >= 4
#endif // SB_HAS(GRAPHICS)
scoped_ptr<render_tree::RawImageMemory> AllocateRawImageMemory(
diff --git a/src/cobalt/renderer/rasterizer/testdata/RotatedThenScaledRectWithDifferentRoundedCorners-expected.png b/src/cobalt/renderer/rasterizer/testdata/RotatedThenScaledRectWithDifferentRoundedCorners-expected.png
new file mode 100644
index 0000000..b1725a0
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/RotatedThenScaledRectWithDifferentRoundedCorners-expected.png
Binary files differ
diff --git a/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset25pxSpread3pxBlurAndRoundedCorners-expected.png b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset25pxSpread3pxBlurAndRoundedCorners-expected.png
new file mode 100644
index 0000000..94a9b2f
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset25pxSpread3pxBlurAndRoundedCorners-expected.png
Binary files differ
diff --git a/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset5pxSpreadAndRoundedCorners-expected.png b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset5pxSpreadAndRoundedCorners-expected.png
new file mode 100644
index 0000000..26f22df
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowEllipseWithOutset5pxSpreadAndRoundedCorners-expected.png
Binary files differ
diff --git a/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowWithSpreadAndBlurCentered-expected.png b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowWithSpreadAndBlurCentered-expected.png
new file mode 100644
index 0000000..9c2b5b6
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/ScaledBoxShadowWithSpreadAndBlurCentered-expected.png
Binary files differ
diff --git a/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRectWithDifferentRoundedCorners-expected.png b/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRectWithDifferentRoundedCorners-expected.png
new file mode 100644
index 0000000..9b62917
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRectWithDifferentRoundedCorners-expected.png
Binary files differ
diff --git a/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRoundedCornersViewportOverImage-expected.png b/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRoundedCornersViewportOverImage-expected.png
new file mode 100644
index 0000000..55c2d7d
--- /dev/null
+++ b/src/cobalt/renderer/rasterizer/testdata/ScaledThenRotatedRoundedCornersViewportOverImage-expected.png
Binary files differ
diff --git a/src/cobalt/script/error_report.h b/src/cobalt/script/error_report.h
new file mode 100644
index 0000000..82c1b12
--- /dev/null
+++ b/src/cobalt/script/error_report.h
@@ -0,0 +1,41 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef COBALT_SCRIPT_ERROR_REPORT_H_
+#define COBALT_SCRIPT_ERROR_REPORT_H_
+
+#include <string>
+
+#include "base/memory/scoped_ptr.h"
+#include "cobalt/script/value_handle.h"
+
+namespace cobalt {
+namespace script {
+
+struct ErrorReport {
+ public:
+ ErrorReport() : line_number(0), column_number(0), is_muted(false) {}
+
+ std::string message;
+ std::string filename;
+ uint32 line_number;
+ uint32 column_number;
+ scoped_ptr<script::ValueHandleHolder> error;
+ bool is_muted;
+};
+
+} // namespace script
+} // namespace cobalt
+
+#endif // COBALT_SCRIPT_ERROR_REPORT_H_
diff --git a/src/cobalt/script/fake_global_environment.h b/src/cobalt/script/fake_global_environment.h
index ca8c3fa..391c5e7 100644
--- a/src/cobalt/script/fake_global_environment.h
+++ b/src/cobalt/script/fake_global_environment.h
@@ -49,6 +49,8 @@
void EnableEval() OVERRIDE {}
void DisableJit() OVERRIDE {}
void SetReportEvalCallback(const base::Closure& /*report_eval*/) OVERRIDE {}
+ void SetReportErrorCallback(
+ const ReportErrorCallback& /*report_eval*/) OVERRIDE {}
void Bind(const std::string& /*identifier*/,
const scoped_refptr<Wrappable>& /*impl*/) OVERRIDE {}
ScriptValueFactory* script_value_factory() { return NULL; }
diff --git a/src/cobalt/script/global_environment.h b/src/cobalt/script/global_environment.h
index 794cc22..039f05d 100644
--- a/src/cobalt/script/global_environment.h
+++ b/src/cobalt/script/global_environment.h
@@ -19,6 +19,7 @@
#include "base/memory/ref_counted.h"
#include "base/optional.h"
+#include "cobalt/script/error_report.h"
#include "cobalt/script/opaque_handle.h"
#include "cobalt/script/script_value.h"
#include "cobalt/script/script_value_factory.h"
@@ -35,6 +36,9 @@
// Manages a handle to a JavaScript engine's global object.
class GlobalEnvironment : public base::RefCounted<GlobalEnvironment> {
public:
+ typedef base::Callback<bool(const ErrorReport& error_report)>
+ ReportErrorCallback;
+
// Create a new global object with bindings as defined for the definition of
// the GlobalInterface type. The IDL for this interface must have the
// PrimaryGlobal or Global extended attribute.
@@ -95,6 +99,10 @@
// constructor is used.
virtual void SetReportEvalCallback(const base::Closure& report_eval) = 0;
+ // Set a callback that will be fired whenever a JavaScript error occurs.
+ virtual void SetReportErrorCallback(
+ const ReportErrorCallback& report_error) = 0;
+
// Dynamically bind a cpp object to the javascript global object with the
// supplied identifier.
// This method is useful for testing and debug purposes, as well as for
diff --git a/src/cobalt/script/mozjs-45/mozjs_global_environment.cc b/src/cobalt/script/mozjs-45/mozjs_global_environment.cc
index c850506..2673778 100644
--- a/src/cobalt/script/mozjs-45/mozjs_global_environment.cc
+++ b/src/cobalt/script/mozjs-45/mozjs_global_environment.cc
@@ -339,6 +339,12 @@
report_eval_ = report_eval;
}
+void MozjsGlobalEnvironment::SetReportErrorCallback(
+ const ReportErrorCallback& report_error_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ report_error_callback_ = report_error_callback;
+}
+
void MozjsGlobalEnvironment::Bind(const std::string& identifier,
const scoped_refptr<Wrappable>& impl) {
TRACK_MEMORY_SCOPE("Javascript");
@@ -456,19 +462,54 @@
void MozjsGlobalEnvironment::ReportError(const char* message,
JSErrorReport* report) {
- std::string error_message;
+ JS::RootedValue exception(context_);
+ ::JS_GetPendingException(context_, &exception);
+
+ // Note: we must do this before running any more code on context.
+ ::JS_ClearPendingException(context_);
+
+ // Populate the error report.
+ ErrorReport error_report;
if (report->errorNumber == JSMSG_CSP_BLOCKED_EVAL) {
- error_message = eval_disabled_message_.value_or(message);
+ error_report.message = eval_disabled_message_.value_or(message);
} else {
- error_message = message;
+ error_report.message = message;
+ }
+ error_report.filename =
+ report->filename ? report->filename : "<internal exception>";
+ error_report.line_number = report->lineno;
+ error_report.column_number = report->column;
+ // Let error object be the object that represents the error: in the case of
+ // an uncaught exception, that would be the object that was thrown; in the
+ // case of a JavaScript error that would be an Error object. If there is no
+ // corresponding object, then the null value must be used instead.
+ // https://www.w3.org/TR/html5/webappapis.html#runtime-script-errors
+ if (exception.isObject()) {
+ error_report.error.reset(
+ new MozjsValueHandleHolder(exception, context_, wrapper_factory()));
+ }
+ error_report.is_muted = report->isMuted;
+
+ // If this isn't simply a warning, and the error wasn't caused by JS running
+ // out of memory (in which case the callback will fail as well), then run
+ // the callback. In the case that it returns that the script was handled,
+ // simply return; the error should only be reported to the user if it wasn't
+ // handled.
+ if (!JSREPORT_IS_WARNING(report->flags) &&
+ report->errorNumber != JSMSG_OUT_OF_MEMORY &&
+ !report_error_callback_.is_null() &&
+ report_error_callback_.Run(error_report)) {
+ return;
}
+ // If the error is not handled, then the error may be reported to the user.
+ // https://www.w3.org/TR/html5/webappapis.html#runtime-script-errors-in-documents
if (last_error_message_) {
- *last_error_message_ = error_message;
+ *last_error_message_ = error_report.message;
} else {
- const char* filename = report->filename ? report->filename : "(none)";
- LOG(ERROR) << "JS Error: " << filename << ":" << report->lineno << ":"
- << report->column << ": " << error_message;
+ LOG(ERROR) << "JS Error: " << error_report.filename << ":"
+ << error_report.line_number << ":" << error_report.column_number
+ << ": " << error_report.message;
}
}
diff --git a/src/cobalt/script/mozjs-45/mozjs_global_environment.h b/src/cobalt/script/mozjs-45/mozjs_global_environment.h
index e34eabe..a7bccae 100644
--- a/src/cobalt/script/mozjs-45/mozjs_global_environment.h
+++ b/src/cobalt/script/mozjs-45/mozjs_global_environment.h
@@ -75,6 +75,9 @@
void SetReportEvalCallback(const base::Closure& report_eval) OVERRIDE;
+ void SetReportErrorCallback(
+ const ReportErrorCallback& report_error_callback) OVERRIDE;
+
void Bind(const std::string& identifier,
const scoped_refptr<Wrappable>& impl) OVERRIDE;
@@ -182,6 +185,7 @@
bool eval_enabled_;
base::optional<std::string> eval_disabled_message_;
base::Closure report_eval_;
+ ReportErrorCallback report_error_callback_;
friend class GlobalObjectProxy;
};
diff --git a/src/cobalt/script/mozjs/mozjs_global_environment.cc b/src/cobalt/script/mozjs/mozjs_global_environment.cc
index 71b593d..52af1fe 100644
--- a/src/cobalt/script/mozjs/mozjs_global_environment.cc
+++ b/src/cobalt/script/mozjs/mozjs_global_environment.cc
@@ -344,6 +344,12 @@
report_eval_ = report_eval;
}
+void MozjsGlobalEnvironment::SetReportErrorCallback(
+ const ReportErrorCallback& report_error_callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ report_error_callback_ = report_error_callback;
+}
+
void MozjsGlobalEnvironment::Bind(const std::string& identifier,
const scoped_refptr<Wrappable>& impl) {
TRACK_MEMORY_SCOPE("Javascript");
diff --git a/src/cobalt/script/mozjs/mozjs_global_environment.h b/src/cobalt/script/mozjs/mozjs_global_environment.h
index 4914e18..e221a5d 100644
--- a/src/cobalt/script/mozjs/mozjs_global_environment.h
+++ b/src/cobalt/script/mozjs/mozjs_global_environment.h
@@ -74,6 +74,9 @@
void SetReportEvalCallback(const base::Closure& report_eval) OVERRIDE;
+ void SetReportErrorCallback(
+ const ReportErrorCallback& report_error_callback) OVERRIDE;
+
void Bind(const std::string& identifier,
const scoped_refptr<Wrappable>& impl) OVERRIDE;
@@ -181,6 +184,7 @@
bool eval_enabled_;
base::optional<std::string> eval_disabled_message_;
base::Closure report_eval_;
+ ReportErrorCallback report_error_callback_;
friend class GlobalObjectProxy;
};
diff --git a/src/cobalt/script/script.gyp b/src/cobalt/script/script.gyp
index a7e9192..34a7c4e 100644
--- a/src/cobalt/script/script.gyp
+++ b/src/cobalt/script/script.gyp
@@ -21,6 +21,7 @@
'type': 'static_library',
'sources': [
'call_frame.h',
+ 'error_report.h',
'exception_message.cc',
'exception_message.h',
'execution_state.cc',
diff --git a/src/cobalt/speech/google_speech_service.cc b/src/cobalt/speech/google_speech_service.cc
index 23dedfd..6fb7a34 100644
--- a/src/cobalt/speech/google_speech_service.cc
+++ b/src/cobalt/speech/google_speech_service.cc
@@ -290,14 +290,12 @@
const char* speech_api_key = "";
#if defined(OS_STARBOARD)
-#if SB_API_VERSION >= 2
const int kSpeechApiKeyLength = 100;
char buffer[kSpeechApiKeyLength] = {0};
bool result = SbSystemGetProperty(kSbSystemPropertySpeechApiKey, buffer,
SB_ARRAY_SIZE_INT(buffer));
SB_DCHECK(result);
speech_api_key = result ? buffer : "";
-#endif // SB_API_VERSION >= 2
#endif // defined(OS_STARBOARD)
up_url = AppendQueryParameter(up_url, "key", speech_api_key);
diff --git a/src/cobalt/speech/speech_configuration.h b/src/cobalt/speech/speech_configuration.h
index 8b9e0ba..c23ee5e 100644
--- a/src/cobalt/speech/speech_configuration.h
+++ b/src/cobalt/speech/speech_configuration.h
@@ -16,15 +16,14 @@
#define COBALT_SPEECH_SPEECH_CONFIGURATION_H_
#include "build/build_config.h"
-
-#if defined(OS_STARBOARD)
#include "starboard/configuration.h"
-#if SB_HAS(MICROPHONE) && SB_API_VERSION >= 2
+
+#if SB_HAS(MICROPHONE)
#define SB_USE_SB_MICROPHONE 1
-#endif // SB_HAS(MICROPHONE) && SB_VERSION(2)
+#endif // SB_HAS(MICROPHONE)
+
#if SB_HAS(SPEECH_RECOGNIZER) && SB_API_VERSION >= 5
#define SB_USE_SB_SPEECH_RECOGNIZER 1
#endif // SB_HAS(SPEECH_RECOGNIZER) && SB_API_VERSION >= 5
-#endif // defined(OS_STARBOARD)
#endif // COBALT_SPEECH_SPEECH_CONFIGURATION_H_
diff --git a/src/cobalt/speech/speech_synthesis.cc b/src/cobalt/speech/speech_synthesis.cc
index 5e3e1bb..fb34eaa 100644
--- a/src/cobalt/speech/speech_synthesis.cc
+++ b/src/cobalt/speech/speech_synthesis.cc
@@ -100,12 +100,6 @@
return;
}
-#if SB_API_VERSION < 4
- // DEPRECATED IN API VERSION 4
- std::string language =
- utterance->lang().empty() ? navigator_->language() : utterance->lang();
- SbSpeechSynthesisSetLanguage(language.c_str());
-#endif
SB_DLOG(INFO) << "Speaking: \"" << utterance->text() << "\" "
<< utterance->lang();
SbSpeechSynthesisSpeak(utterance->text().c_str());
diff --git a/src/cobalt/version.h b/src/cobalt/version.h
index e9daa78..88582da 100644
--- a/src/cobalt/version.h
+++ b/src/cobalt/version.h
@@ -15,6 +15,6 @@
#define COBALT_VERSION_H_
// Cobalt release number.
-#define COBALT_VERSION "12"
+#define COBALT_VERSION "13"
#endif // COBALT_VERSION_H_
diff --git a/src/cobalt/webdriver/server.cc b/src/cobalt/webdriver/server.cc
index 321c8c4..146b674 100644
--- a/src/cobalt/webdriver/server.cc
+++ b/src/cobalt/webdriver/server.cc
@@ -21,6 +21,8 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/string_util.h"
+#include "net/base/ip_endpoint.h"
+#include "net/base/net_errors.h"
#include "net/base/tcp_listen_socket.h"
#include "net/server/http_server_request_info.h"
@@ -177,11 +179,21 @@
WebDriverServer::WebDriverServer(int port, const std::string& listen_ip,
const HandleRequestCallback& callback)
- : handle_request_callback_(callback) {
+ : handle_request_callback_(callback),
+ server_address_("WebDriver.Server",
+ "Address to communicate with WebDriver.") {
// Create http server
factory_.reset(new net::TCPListenSocketFactory(listen_ip, port));
server_ = new net::HttpServer(*factory_, this);
- LOG(INFO) << "Starting WebDriver server on port " << port;
+ GURL address;
+ int result = GetLocalAddress(&address);
+ if (result == net::OK) {
+ LOG(INFO) << "Starting WebDriver server on port " << port;
+ server_address_ = address.spec();
+ } else {
+ LOG(WARNING) << "Could not start WebDriver server";
+ server_address_ = "<NOT RUNNING>";
+ }
}
void WebDriverServer::OnHttpRequest(int connection_id,
@@ -217,5 +229,14 @@
parameters.Pass(), response_handler.Pass());
}
+int WebDriverServer::GetLocalAddress(GURL* out) const {
+ net::IPEndPoint ip_addr;
+ int result = server_->GetLocalAddress(&ip_addr);
+ if (result == net::OK) {
+ *out = GURL("http://" + ip_addr.ToString());
+ }
+ return result;
+}
+
} // namespace webdriver
} // namespace cobalt
diff --git a/src/cobalt/webdriver/server.h b/src/cobalt/webdriver/server.h
index 71a66fa..03ba2b4 100644
--- a/src/cobalt/webdriver/server.h
+++ b/src/cobalt/webdriver/server.h
@@ -22,7 +22,9 @@
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "base/values.h"
+#include "cobalt/base/c_val.h"
#include "cobalt/webdriver/protocol/server_status.h"
+#include "googleurl/src/gurl.h"
#include "net/base/stream_listen_socket.h"
#include "net/server/http_server.h"
@@ -92,10 +94,13 @@
void OnClose(int) OVERRIDE {} // NOLINT(readability/casting)
private:
+ int GetLocalAddress(GURL* out) const;
+
base::ThreadChecker thread_checker_;
HandleRequestCallback handle_request_callback_;
scoped_ptr<net::StreamListenSocketFactory> factory_;
scoped_refptr<net::HttpServer> server_;
+ base::CVal<std::string> server_address_;
};
} // namespace webdriver
diff --git a/src/media/audio/android/audio_manager_android.cc b/src/media/audio/android/audio_manager_android.cc
deleted file mode 100644
index 0e4d6ba..0000000
--- a/src/media/audio/android/audio_manager_android.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/android/audio_manager_android.h"
-
-#include "base/logging.h"
-#include "media/audio/android/opensles_input.h"
-#include "media/audio/android/opensles_output.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/fake_audio_input_stream.h"
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 10;
-
-AudioManager* CreateAudioManager() {
- return new AudioManagerAndroid();
-}
-
-AudioManagerAndroid::AudioManagerAndroid() {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-}
-
-AudioManagerAndroid::~AudioManagerAndroid() {
- Shutdown();
-}
-
-bool AudioManagerAndroid::HasAudioOutputDevices() {
- return true;
-}
-
-bool AudioManagerAndroid::HasAudioInputDevices() {
- return false;
-}
-
-AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new OpenSLESOutputStream(this, params);
-}
-
-AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return new OpenSLESOutputStream(this, params);
-}
-
-AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new OpenSLESInputStream(this, params);
-}
-
-AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return new OpenSLESInputStream(this, params);
-}
-
-} // namespace media
diff --git a/src/media/audio/android/audio_manager_android.h b/src/media/audio/android/audio_manager_android.h
deleted file mode 100644
index 8f14808..0000000
--- a/src/media/audio/android/audio_manager_android.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
-#define MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
-
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-// Android implemention of AudioManager.
-class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
- public:
- AudioManagerAndroid();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
-
- protected:
- virtual ~AudioManagerAndroid();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_ANDROID_AUDIO_MANAGER_ANDROID_H_
diff --git a/src/media/audio/android/opensles_input.cc b/src/media/audio/android/opensles_input.cc
deleted file mode 100644
index 0df5bc1..0000000
--- a/src/media/audio/android/opensles_input.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/android/opensles_input.h"
-
-#include "base/logging.h"
-#include "media/audio/android/audio_manager_android.h"
-
-namespace media {
-
-OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
- const AudioParameters& params)
- : audio_manager_(audio_manager),
- callback_(NULL),
- recorder_(NULL),
- simple_buffer_queue_(NULL),
- active_queue_(0),
- buffer_size_bytes_(0),
- started_(false) {
- format_.formatType = SL_DATAFORMAT_PCM;
- format_.numChannels = static_cast<SLuint32>(params.channels());
- // Provides sampling rate in milliHertz to OpenSLES.
- format_.samplesPerSec = static_cast<SLuint32>(params.sample_rate() * 1000);
- format_.bitsPerSample = params.bits_per_sample();
- format_.containerSize = params.bits_per_sample();
- format_.endianness = SL_BYTEORDER_LITTLEENDIAN;
- if (format_.numChannels == 1)
- format_.channelMask = SL_SPEAKER_FRONT_CENTER;
- else if (format_.numChannels == 2)
- format_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- else
- NOTREACHED() << "Unsupported number of channels: " << format_.numChannels;
-
- buffer_size_bytes_ = params.GetBytesPerBuffer();
-
- memset(&audio_data_, 0, sizeof(audio_data_));
-}
-
-OpenSLESInputStream::~OpenSLESInputStream() {
- DCHECK(!recorder_object_.Get());
- DCHECK(!engine_object_.Get());
- DCHECK(!recorder_);
- DCHECK(!simple_buffer_queue_);
- DCHECK(!audio_data_[0]);
-}
-
-bool OpenSLESInputStream::Open() {
- if (engine_object_.Get())
- return false;
-
- if (!CreateRecorder())
- return false;
-
- SetupAudioBuffer();
-
- return true;
-}
-
-void OpenSLESInputStream::Start(AudioInputCallback* callback) {
- DCHECK(callback);
- DCHECK(recorder_);
- DCHECK(simple_buffer_queue_);
- if (started_)
- return;
-
- // Enable the flags before streaming.
- callback_ = callback;
- active_queue_ = 0;
- started_ = true;
-
- SLresult err = SL_RESULT_UNKNOWN_ERROR;
- // Enqueues |kNumOfQueuesInBuffer| zero buffers to get the ball rolling.
- for (int i = 0; i < kNumOfQueuesInBuffer - 1; ++i) {
- err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[i],
- buffer_size_bytes_);
- if (SL_RESULT_SUCCESS != err) {
- HandleError(err);
- return;
- }
- }
-
- // Start the recording by setting the state to |SL_RECORDSTATE_RECORDING|.
- err = (*recorder_)->SetRecordState(recorder_, SL_RECORDSTATE_RECORDING);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- HandleError(err);
-}
-
-void OpenSLESInputStream::Stop() {
- if (!started_)
- return;
-
- // Stop recording by setting the record state to |SL_RECORDSTATE_STOPPED|.
- SLresult err = (*recorder_)->SetRecordState(recorder_,
- SL_RECORDSTATE_STOPPED);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(WARNING) << "SetRecordState() failed to set the state to stop";
- }
-
- // Clear the buffer queue to get rid of old data when resuming recording.
- err = (*simple_buffer_queue_)->Clear(simple_buffer_queue_);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(WARNING) << "Clear() failed to clear the buffer queue";
- }
-
- started_ = false;
-}
-
-void OpenSLESInputStream::Close() {
- // Stop the stream if it is still recording.
- Stop();
-
- // Explicitly free the player objects and invalidate their associated
- // interfaces. They have to be done in the correct order.
- recorder_object_.Reset();
- engine_object_.Reset();
- simple_buffer_queue_ = NULL;
- recorder_ = NULL;
-
- ReleaseAudioBuffer();
-
- audio_manager_->ReleaseInputStream(this);
-}
-
-double OpenSLESInputStream::GetMaxVolume() {
- NOTIMPLEMENTED();
- return 0.0;
-}
-
-void OpenSLESInputStream::SetVolume(double volume) {
- NOTIMPLEMENTED();
-}
-
-double OpenSLESInputStream::GetVolume() {
- NOTIMPLEMENTED();
- return 0.0;
-}
-
-void OpenSLESInputStream::SetAutomaticGainControl(bool enabled) {
- NOTIMPLEMENTED();
-}
-
-bool OpenSLESInputStream::GetAutomaticGainControl() {
- NOTIMPLEMENTED();
- return false;
-}
-
-bool OpenSLESInputStream::CreateRecorder() {
- // Initializes the engine object with specific option. After working with the
- // object, we need to free the object and its resources.
- SLEngineOption option[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
- };
- SLresult err = slCreateEngine(engine_object_.Receive(),
- 1,
- option,
- 0,
- NULL,
- NULL);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Realize the SL engine object in synchronous mode.
- err = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Get the SL engine interface which is implicit.
- SLEngineItf engine;
- err = engine_object_->GetInterface(
- engine_object_.Get(), SL_IID_ENGINE, &engine);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Audio source configuration.
- SLDataLocator_IODevice mic_locator = {
- SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
- SL_DEFAULTDEVICEID_AUDIOINPUT, NULL
- };
- SLDataSource audio_source = { &mic_locator, NULL };
-
- // Audio sink configuration.
- SLDataLocator_AndroidSimpleBufferQueue buffer_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // Locator type.
- static_cast<SLuint32>(kNumOfQueuesInBuffer) // Number of buffers.
- };
- SLDataSink audio_sink = { &buffer_queue, &format_ };
-
- // Create an audio recorder.
- const SLuint32 number_of_interfaces = 1;
- const SLInterfaceID interface_id[number_of_interfaces] = {
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE
- };
- const SLboolean interface_required[number_of_interfaces] = {
- SL_BOOLEAN_TRUE
- };
- err = (*engine)->CreateAudioRecorder(engine,
- recorder_object_.Receive(),
- &audio_source,
- &audio_sink,
- number_of_interfaces,
- interface_id,
- interface_required);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(ERROR) << "CreateAudioRecorder failed with error code " << err;
- return false;
- }
-
- // Realize the recorder object in synchronous mode.
- err = recorder_object_->Realize(recorder_object_.Get(), SL_BOOLEAN_FALSE);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(ERROR) << "Recprder Realize() failed with error code " << err;
- return false;
- }
-
- // Get an implicit recorder interface.
- err = recorder_object_->GetInterface(recorder_object_.Get(),
- SL_IID_RECORD,
- &recorder_);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Get the simple buffer queue interface.
- err = recorder_object_->GetInterface(recorder_object_.Get(),
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- &simple_buffer_queue_);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Register the input callback for the simple buffer queue.
- // This callback will be called when receiving new data from the device.
- err = (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
- SimpleBufferQueueCallback,
- this);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
-
- return (SL_RESULT_SUCCESS == err);
-}
-
-void OpenSLESInputStream::SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
- OpenSLESInputStream* stream =
- reinterpret_cast<OpenSLESInputStream*>(instance);
- stream->ReadBufferQueue();
-}
-
-void OpenSLESInputStream::ReadBufferQueue() {
- if (!started_)
- return;
-
- // Get the enqueued buffer from the soundcard.
- SLresult err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[active_queue_],
- buffer_size_bytes_);
- if (SL_RESULT_SUCCESS != err)
- HandleError(err);
-
- // TODO(xians): Get an accurate delay estimation.
- callback_->OnData(this,
- audio_data_[active_queue_],
- buffer_size_bytes_,
- buffer_size_bytes_,
- 0.0);
-
- active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
-}
-
-void OpenSLESInputStream::SetupAudioBuffer() {
- DCHECK(!audio_data_[0]);
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- audio_data_[i] = new uint8[buffer_size_bytes_];
- }
-}
-
-void OpenSLESInputStream::ReleaseAudioBuffer() {
- if (audio_data_[0]) {
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- delete [] audio_data_[i];
- audio_data_[i] = NULL;
- }
- }
-}
-
-void OpenSLESInputStream::HandleError(SLresult error) {
- DLOG(FATAL) << "OpenSLES error " << error;
- if (callback_)
- callback_->OnError(this, error);
-}
-
-} // namespace media
diff --git a/src/media/audio/android/opensles_input.h b/src/media/audio/android/opensles_input.h
deleted file mode 100644
index 0d18f9b..0000000
--- a/src/media/audio/android/opensles_input.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
-#define MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
-
-#include "base/compiler_specific.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/android/opensles_util.h"
-#include <SLES/OpenSLES_Android.h>
-
-namespace media {
-
-class AudioManagerAndroid;
-
-// Implements PCM audio input support for Android using the OpenSLES API.
-class OpenSLESInputStream : public AudioInputStream {
- public:
- static const int kNumOfQueuesInBuffer = 2;
-
- OpenSLESInputStream(AudioManagerAndroid* manager,
- const AudioParameters& params);
-
- virtual ~OpenSLESInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- private:
- bool CreateRecorder();
-
- static void SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
-
- void ReadBufferQueue();
-
- // Called in Open();
- void SetupAudioBuffer();
-
- // Called in Close();
- void ReleaseAudioBuffer();
-
- // If OpenSLES reports an error this function handles it and passes it to
- // the attached AudioInputCallback::OnError().
- void HandleError(SLresult error);
-
- AudioManagerAndroid* audio_manager_;
-
- AudioInputCallback* callback_;
-
- // Shared engine interfaces for the app.
- media::ScopedSLObjectItf recorder_object_;
- media::ScopedSLObjectItf engine_object_;
-
- SLRecordItf recorder_;
-
- // Buffer queue recorder interface.
- SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
-
- SLDataFormat_PCM format_;
-
- // Audio buffers that are allocated in the constructor based on
- // info from audio parameters.
- uint8* audio_data_[kNumOfQueuesInBuffer];
-
- int active_queue_;
- int buffer_size_bytes_;
-
- bool started_;
-
- DISALLOW_COPY_AND_ASSIGN(OpenSLESInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
diff --git a/src/media/audio/android/opensles_output.cc b/src/media/audio/android/opensles_output.cc
deleted file mode 100644
index 26ae25b..0000000
--- a/src/media/audio/android/opensles_output.cc
+++ /dev/null
@@ -1,311 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/android/opensles_output.h"
-
-#include "base/logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/android/audio_manager_android.h"
-
-namespace media {
-
-OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params)
- : audio_manager_(manager),
- callback_(NULL),
- player_(NULL),
- simple_buffer_queue_(NULL),
- active_queue_(0),
- buffer_size_bytes_(0),
- started_(false),
- volume_(1.0) {
- format_.formatType = SL_DATAFORMAT_PCM;
- format_.numChannels = static_cast<SLuint32>(params.channels());
- // Provides sampling rate in milliHertz to OpenSLES.
- format_.samplesPerSec = static_cast<SLuint32>(params.sample_rate() * 1000);
- format_.bitsPerSample = params.bits_per_sample();
- format_.containerSize = params.bits_per_sample();
- format_.endianness = SL_BYTEORDER_LITTLEENDIAN;
- if (format_.numChannels == 1)
- format_.channelMask = SL_SPEAKER_FRONT_CENTER;
- else if (format_.numChannels == 2)
- format_.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- else
- NOTREACHED() << "Unsupported number of channels: " << format_.numChannels;
-
- buffer_size_bytes_ = params.GetBytesPerBuffer();
- audio_bus_ = AudioBus::Create(params);
-
- memset(&audio_data_, 0, sizeof(audio_data_));
-}
-
-OpenSLESOutputStream::~OpenSLESOutputStream() {
- DCHECK(!engine_object_.Get());
- DCHECK(!player_object_.Get());
- DCHECK(!output_mixer_.Get());
- DCHECK(!player_);
- DCHECK(!simple_buffer_queue_);
- DCHECK(!audio_data_[0]);
-}
-
-bool OpenSLESOutputStream::Open() {
- if (engine_object_.Get())
- return false;
-
- if (!CreatePlayer())
- return false;
-
- SetupAudioBuffer();
-
- return true;
-}
-
-void OpenSLESOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK(player_);
- DCHECK(simple_buffer_queue_);
- if (started_)
- return;
-
- // Enable the flags before streaming.
- callback_ = callback;
- active_queue_ = 0;
- started_ = true;
-
- // Avoid start-up glitches by filling up one buffer queue before starting
- // the stream.
- FillBufferQueue();
-
- // Start streaming data by setting the play state to |SL_PLAYSTATE_PLAYING|.
- SLresult err = (*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(WARNING) << "SetPlayState() failed to start playing";
- }
-}
-
-void OpenSLESOutputStream::Stop() {
- if (!started_)
- return;
-
- started_ = false;
- // Stop playing by setting the play state to |SL_PLAYSTATE_STOPPED|.
- SLresult err = (*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(WARNING) << "SetPlayState() failed to set the state to stop";
- }
-
- // Clear the buffer queue so that the old data won't be played when
- // resuming playing.
- err = (*simple_buffer_queue_)->Clear(simple_buffer_queue_);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(WARNING) << "Clear() failed to clear the buffer queue";
- }
-}
-
-void OpenSLESOutputStream::Close() {
- // Stop the stream if it is still playing.
- Stop();
-
- // Explicitly free the player objects and invalidate their associated
- // interfaces. They have to be done in the correct order.
- output_mixer_.Reset();
- player_object_.Reset();
- engine_object_.Reset();
- simple_buffer_queue_ = NULL;
- player_ = NULL;
-
- ReleaseAudioBuffer();
-
- audio_manager_->ReleaseOutputStream(this);
-}
-
-void OpenSLESOutputStream::SetVolume(double volume) {
- float volume_float = static_cast<float>(volume);
- if (volume_float < 0.0f || volume_float > 1.0f) {
- return;
- }
- volume_ = volume_float;
-}
-
-void OpenSLESOutputStream::GetVolume(double* volume) {
- *volume = static_cast<double>(volume_);
-}
-
-bool OpenSLESOutputStream::CreatePlayer() {
- // Initializes the engine object with specific option. After working with the
- // object, we need to free the object and its resources.
- SLEngineOption option[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) }
- };
- SLresult err = slCreateEngine(engine_object_.Receive(), 1, option, 0,
- NULL, NULL);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Realize the SL engine object in synchronous mode.
- err = engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Get the SL engine interface which is implicit.
- SLEngineItf engine;
- err = engine_object_->GetInterface(engine_object_.Get(),
- SL_IID_ENGINE,
- &engine);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Create ouput mixer object to be used by the player.
- // TODO(xians): Do we need the environmental reverb auxiliary effect?
- err = (*engine)->CreateOutputMix(engine,
- output_mixer_.Receive(),
- 0,
- NULL,
- NULL);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Realizing the output mix object in synchronous mode.
- err = output_mixer_->Realize(output_mixer_.Get(), SL_BOOLEAN_FALSE);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Audio source configuration.
- SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(kNumOfQueuesInBuffer)
- };
- SLDataSource audio_source = { &simple_buffer_queue, &format_ };
-
- // Audio sink configuration.
- SLDataLocator_OutputMix locator_output_mix = {
- SL_DATALOCATOR_OUTPUTMIX, output_mixer_.Get()
- };
- SLDataSink audio_sink = { &locator_output_mix, NULL };
-
- // Create an audio player.
- const SLuint32 number_of_interfaces = 1;
- const SLInterfaceID interface_id[number_of_interfaces] = {
- SL_IID_BUFFERQUEUE
- };
- const SLboolean interface_required[number_of_interfaces] = {
- SL_BOOLEAN_TRUE
- };
- err = (*engine)->CreateAudioPlayer(engine,
- player_object_.Receive(),
- &audio_source,
- &audio_sink,
- number_of_interfaces,
- interface_id,
- interface_required);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(ERROR) << "CreateAudioPlayer() failed with error code " << err;
- return false;
- }
-
- // Realize the player object in synchronous mode.
- err = player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err) {
- DLOG(ERROR) << "Player Realize() failed with error code " << err;
- return false;
- }
-
- // Get an implicit player interface.
- err = player_object_->GetInterface(
- player_object_.Get(), SL_IID_PLAY, &player_);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Get the simple buffer queue interface.
- err = player_object_->GetInterface(player_object_.Get(),
- SL_IID_BUFFERQUEUE,
- &simple_buffer_queue_);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
- if (SL_RESULT_SUCCESS != err)
- return false;
-
- // Register the input callback for the simple buffer queue.
- // This callback will be called when the soundcard needs data.
- err = (*simple_buffer_queue_)->RegisterCallback(simple_buffer_queue_,
- SimpleBufferQueueCallback,
- this);
- DCHECK_EQ(SL_RESULT_SUCCESS, err);
-
- return (SL_RESULT_SUCCESS == err);
-}
-
-void OpenSLESOutputStream::SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance) {
- OpenSLESOutputStream* stream =
- reinterpret_cast<OpenSLESOutputStream*>(instance);
- stream->FillBufferQueue();
-}
-
-void OpenSLESOutputStream::FillBufferQueue() {
- if (!started_)
- return;
-
- // Read data from the registered client source.
- // TODO(xians): Get an accurate delay estimation.
- uint32 hardware_delay = buffer_size_bytes_;
- int frames_filled = callback_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, hardware_delay));
- int num_filled_bytes =
- frames_filled * audio_bus_->channels() * format_.bitsPerSample / 8;
- DCHECK_LE(static_cast<size_t>(num_filled_bytes), buffer_size_bytes_);
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->ToInterleaved(
- frames_filled, format_.bitsPerSample / 8, audio_data_[active_queue_]);
-
- // Perform in-place, software-volume adjustments.
- media::AdjustVolume(audio_data_[active_queue_],
- num_filled_bytes,
- format_.numChannels,
- format_.bitsPerSample / 8,
- volume_);
-
- // Enqueue the buffer for playback.
- SLresult err = (*simple_buffer_queue_)->Enqueue(
- simple_buffer_queue_,
- audio_data_[active_queue_],
- num_filled_bytes);
- if (SL_RESULT_SUCCESS != err)
- HandleError(err);
-
- active_queue_ = (active_queue_ + 1) % kNumOfQueuesInBuffer;
-}
-
-void OpenSLESOutputStream::SetupAudioBuffer() {
- DCHECK(!audio_data_[0]);
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- audio_data_[i] = new uint8[buffer_size_bytes_];
- }
-}
-
-void OpenSLESOutputStream::ReleaseAudioBuffer() {
- if (audio_data_[0]) {
- for (int i = 0; i < kNumOfQueuesInBuffer; ++i) {
- delete [] audio_data_[i];
- audio_data_[i] = NULL;
- }
- }
-}
-
-void OpenSLESOutputStream::HandleError(SLresult error) {
- DLOG(FATAL) << "OpenSLES error " << error;
- if (callback_)
- callback_->OnError(this, error);
-}
-
-} // namespace media
diff --git a/src/media/audio/android/opensles_output.h b/src/media/audio/android/opensles_output.h
deleted file mode 100644
index 9ecfb6c..0000000
--- a/src/media/audio/android/opensles_output.h
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_OUTPUT_H_
-#define MEDIA_AUDIO_ANDROID_OPENSLES_OUTPUT_H_
-
-#include <vector>
-
-#include "base/compiler_specific.h"
-#include "media/audio/android/opensles_util.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include <SLES/OpenSLES_Android.h>
-
-namespace media {
-
-class AudioManagerAndroid;
-
-// Implements PCM audio output support for Android using the OpenSLES API.
-class OpenSLESOutputStream : public AudioOutputStream {
- public:
- static const int kNumOfQueuesInBuffer = 2;
-
- OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params);
-
- virtual ~OpenSLESOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- bool CreatePlayer();
-
- static void SimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf buffer_queue, void* instance);
-
- void FillBufferQueue();
-
- // Called in Open();
- void SetupAudioBuffer();
-
- // Called in Close();
- void ReleaseAudioBuffer();
-
- // If OpenSLES reports an error this function handles it and passes it to
- // the attached AudioOutputCallback::OnError().
- void HandleError(SLresult error);
-
- AudioManagerAndroid* audio_manager_;
-
- AudioSourceCallback* callback_;
-
- // Shared engine interfaces for the app.
- media::ScopedSLObjectItf engine_object_;
- media::ScopedSLObjectItf player_object_;
- media::ScopedSLObjectItf output_mixer_;
-
- SLPlayItf player_;
-
- // Buffer queue recorder interface.
- SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
-
- SLDataFormat_PCM format_;
-
- // Audio buffer arrays that are allocated in the constructor.
- uint8* audio_data_[kNumOfQueuesInBuffer];
-
- int active_queue_;
- size_t buffer_size_bytes_;
-
- bool started_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(OpenSLESOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_ANDROID_OPENSLES_INPUT_H_
diff --git a/src/media/audio/android/opensles_util.h b/src/media/audio/android/opensles_util.h
deleted file mode 100644
index 4a028e2..0000000
--- a/src/media/audio/android/opensles_util.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
-#define MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
-
-#include "base/logging.h"
-#include <SLES/OpenSLES.h>
-
-namespace media {
-
-template <typename SLType, typename SLDerefType>
-class ScopedSLObject {
- public:
- ScopedSLObject() : obj_(NULL) {}
-
- ~ScopedSLObject() { Reset(); }
-
- SLType* Receive() {
- DCHECK(!obj_);
- return &obj_;
- }
-
- SLDerefType operator->() { return *obj_; }
-
- SLType Get() const { return obj_; }
-
- void Reset() {
- if (obj_) {
- (*obj_)->Destroy(obj_);
- obj_ = NULL;
- }
- }
-
- private:
- SLType obj_;
-};
-
-typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_ANDROID_OPENSLES_UTIL_H_
diff --git a/src/media/audio/async_socket_io_handler.h b/src/media/audio/async_socket_io_handler.h
deleted file mode 100644
index d17e3d3..0000000
--- a/src/media/audio/async_socket_io_handler.h
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
-#define MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
-
-#include "base/message_loop.h"
-#include "base/sync_socket.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// The message loop callback interface is different based on platforms.
-#if defined(OS_WIN)
-typedef MessageLoopForIO::IOHandler MessageLoopIOHandler;
-#elif defined(OS_POSIX)
-typedef MessageLoopForIO::Watcher MessageLoopIOHandler;
-#endif
-
-// Extends the CancelableSyncSocket class to allow reading from a socket
-// asynchronously on a TYPE_IO message loop thread. This makes it easy to share
-// a thread that uses a message loop (e.g. for IPC and other things) and not
-// require a separate thread to read from the socket.
-//
-// Example usage (also see the unit tests):
-//
-// class SocketReader {
-// public:
-// SocketReader(base::CancelableSyncSocket* socket)
-// : socket_(socket), buffer_() {
-// io_handler.Initialize(socket_->handle(),
-// base::Bind(&SocketReader::OnDataAvailable,
-// base::Unretained(this));
-// }
-//
-// void AsyncRead() {
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-//
-// private:
-// void OnDataAvailable(int bytes_read) {
-// if (ProcessData(&buffer_[0], bytes_read)) {
-// // Issue another read.
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-// }
-//
-// media::AsyncSocketIoHandler io_handler;
-// base::CancelableSyncSocket* socket_;
-// char buffer_[kBufferSize];
-// };
-//
-class MEDIA_EXPORT AsyncSocketIoHandler
- : public NON_EXPORTED_BASE(base::NonThreadSafe),
- public NON_EXPORTED_BASE(MessageLoopIOHandler) {
- public:
- AsyncSocketIoHandler();
- virtual ~AsyncSocketIoHandler();
-
- // Type definition for the callback. The parameter tells how many
- // bytes were read and is 0 if an error occurred.
- typedef base::Callback<void(int)> ReadCompleteCallback;
-
- // Initializes the AsyncSocketIoHandler by hooking it up to the current
- // thread's message loop (must be TYPE_IO), to do async reads from the socket
- // on the current thread. The |callback| will be invoked whenever a Read()
- // has completed.
- bool Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback);
-
- // Attempts to read from the socket. The return value will be |false|
- // if an error occurred and |true| if data was read or a pending read
- // was issued. Regardless of async or sync operation, the
- // ReadCompleteCallback (see above) will be called when data is available.
- bool Read(char* buffer, int buffer_len);
-
- private:
-#if defined(OS_WIN)
- // Implementation of IOHandler on Windows.
- virtual void OnIOCompleted(MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) OVERRIDE;
-#elif defined(OS_POSIX)
- // Implementation of MessageLoopForIO::Watcher.
- virtual void OnFileCanWriteWithoutBlocking(int socket) OVERRIDE {}
- virtual void OnFileCanReadWithoutBlocking(int socket) OVERRIDE;
-
- void EnsureWatchingSocket();
-#endif
-
- base::SyncSocket::Handle socket_;
-#if defined(OS_WIN)
- MessageLoopForIO::IOContext* context_;
- bool is_pending_;
-#elif defined(OS_POSIX)
- MessageLoopForIO::FileDescriptorWatcher socket_watcher_;
- // |pending_buffer_| and |pending_buffer_len_| are valid only between
- // Read() and OnFileCanReadWithoutBlocking().
- char* pending_buffer_;
- int pending_buffer_len_;
- // |true| iff the message loop is watching the socket for IO events.
- bool is_watching_;
-#endif
- ReadCompleteCallback read_complete_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncSocketIoHandler);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_ASYNC_SOCKET_IO_HANDLER_H_
diff --git a/src/media/audio/async_socket_io_handler_posix.cc b/src/media/audio/async_socket_io_handler_posix.cc
deleted file mode 100644
index eeec7c1..0000000
--- a/src/media/audio/async_socket_io_handler_posix.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-#include <fcntl.h>
-
-#include "base/posix/eintr_wrapper.h"
-
-namespace media {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- pending_buffer_(NULL),
- pending_buffer_len_(0),
- is_watching_(false) {
-}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- DCHECK(CalledOnValidThread());
-}
-
-void AsyncSocketIoHandler::OnFileCanReadWithoutBlocking(int socket) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(socket, socket_);
- DCHECK(!read_complete_.is_null());
-
- if (pending_buffer_) {
- int bytes_read = HANDLE_EINTR(read(socket_, pending_buffer_,
- pending_buffer_len_));
- DCHECK_GT(bytes_read, 0);
- pending_buffer_ = NULL;
- pending_buffer_len_ = 0;
- read_complete_.Run(bytes_read > 0 ? bytes_read : 0);
- } else {
- // We're getting notifications that we can read from the socket while
- // we're not waiting for data. In order to not starve the message loop,
- // let's stop watching the fd and restart the watch when Read() is called.
- is_watching_ = false;
- socket_watcher_.StopWatchingFileDescriptor();
- }
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!pending_buffer_);
-
- EnsureWatchingSocket();
-
- int bytes_read = HANDLE_EINTR(read(socket_, buffer, buffer_len));
- if (bytes_read < 0) {
- if (errno == EAGAIN) {
- pending_buffer_ = buffer;
- pending_buffer_len_ = buffer_len;
- } else {
- NOTREACHED() << "read(): " << errno;
- return false;
- }
- } else {
- read_complete_.Run(bytes_read);
- }
- return true;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- // SyncSocket is blocking by default, so let's convert it to non-blocking.
- int value = fcntl(socket, F_GETFL);
- if (!(value & O_NONBLOCK)) {
- // Set the socket to be non-blocking so we can do async reads.
- if (fcntl(socket, F_SETFL, O_NONBLOCK) == -1) {
- NOTREACHED();
- return false;
- }
- }
-
- return true;
-}
-
-void AsyncSocketIoHandler::EnsureWatchingSocket() {
- DCHECK(CalledOnValidThread());
- if (!is_watching_ && socket_ != base::SyncSocket::kInvalidHandle) {
- is_watching_ = MessageLoopForIO::current()->WatchFileDescriptor(
- socket_, true, MessageLoopForIO::WATCH_READ, &socket_watcher_, this);
- }
-}
-
-} // namespace media.
diff --git a/src/media/audio/async_socket_io_handler_unittest.cc b/src/media/audio/async_socket_io_handler_unittest.cc
deleted file mode 100644
index c7fa47b..0000000
--- a/src/media/audio/async_socket_io_handler_unittest.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-#include "base/bind.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-const char kAsyncSocketIoTestString[] = "Hello, AsyncSocketIoHandler";
-const size_t kAsyncSocketIoTestStringLength =
- arraysize(kAsyncSocketIoTestString);
-
-class TestSocketReader {
- public:
- // Set |number_of_reads_before_quit| to >0 when you expect a specific number
- // of Read operations to complete. Once that number is reached, the current
- // message loop will be Quit(). Set |number_of_reads_before_quit| to -1 if
- // callbacks should not be counted.
- TestSocketReader(base::CancelableSyncSocket* socket,
- int number_of_reads_before_quit,
- bool issue_reads_from_callback)
- : socket_(socket), buffer_(),
- number_of_reads_before_quit_(number_of_reads_before_quit),
- callbacks_received_(0),
- issue_reads_from_callback_(issue_reads_from_callback) {
- io_handler.Initialize(socket_->handle(),
- base::Bind(&TestSocketReader::OnRead,
- base::Unretained(this)));
- }
- ~TestSocketReader() {}
-
- bool IssueRead() {
- return io_handler.Read(&buffer_[0], sizeof(buffer_));
- }
-
- const char* buffer() const { return &buffer_[0]; }
-
- int callbacks_received() const { return callbacks_received_; }
-
- private:
- void OnRead(int bytes_read) {
- EXPECT_GT(bytes_read, 0);
- ++callbacks_received_;
- if (number_of_reads_before_quit_ == callbacks_received_) {
- MessageLoop::current()->Quit();
- } else if (issue_reads_from_callback_) {
- IssueRead();
- }
- }
-
- media::AsyncSocketIoHandler io_handler;
- base::CancelableSyncSocket* socket_; // Ownership lies outside the class.
- char buffer_[kAsyncSocketIoTestStringLength];
- int number_of_reads_before_quit_;
- int callbacks_received_;
- bool issue_reads_from_callback_;
-};
-
-// Workaround to be able to use a base::Closure for sending data.
-// Send() returns int but a closure must return void.
-void SendData(base::CancelableSyncSocket* socket,
- const void* buffer,
- size_t length) {
- socket->Send(buffer, length);
-}
-
-} // end namespace.
-
-// Tests doing a pending read from a socket and use an IO handler to get
-// notified of data.
-TEST(AsyncSocketIoHandlerTest, AsynchronousReadWithMessageLoop) {
- MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], 1, false);
- EXPECT_TRUE(reader.IssueRead());
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- MessageLoop::current()->Run();
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Tests doing a read from a socket when we know that there is data in the
-// socket. Here we want to make sure that any async 'can read' notifications
-// won't trip us off and that the synchronous case works as well.
-TEST(AsyncSocketIoHandlerTest, SynchronousReadWithMessageLoop) {
- MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], -1, false);
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- MessageLoop::current()->PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100));
- MessageLoop::current()->Run();
-
- EXPECT_TRUE(reader.IssueRead());
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- // We've now verified that the read happened synchronously, but it's not
- // guaranteed that the callback has been issued since the callback will be
- // called asynchronously even though the read may have been done.
- // So we call RunUntilIdle() to allow any event notifications or APC's on
- // Windows, to execute before checking the count of how many callbacks we've
- // received.
- MessageLoop::current()->RunUntilIdle();
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Calls Read() from within a callback to test that simple read "loops" work.
-TEST(AsyncSocketIoHandlerTest, ReadFromCallback) {
- MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- const int kReadOperationCount = 10;
- TestSocketReader reader(&pair[0], kReadOperationCount, true);
- EXPECT_TRUE(reader.IssueRead());
-
- // Issue sends on an interval to satisfy the Read() requirements.
- int64 milliseconds = 0;
- for (int i = 0; i < kReadOperationCount; ++i) {
- MessageLoop::current()->PostDelayedTask(FROM_HERE,
- base::Bind(&SendData, &pair[1], kAsyncSocketIoTestString,
- kAsyncSocketIoTestStringLength),
- base::TimeDelta::FromMilliseconds(milliseconds));
- milliseconds += 10;
- }
-
- MessageLoop::current()->PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100 + milliseconds));
-
- MessageLoop::current()->Run();
- EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
-}
diff --git a/src/media/audio/async_socket_io_handler_win.cc b/src/media/audio/async_socket_io_handler_win.cc
deleted file mode 100644
index f83f405..0000000
--- a/src/media/audio/async_socket_io_handler_win.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/async_socket_io_handler.h"
-
-namespace media {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- context_(NULL),
- is_pending_(false) {}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- // We need to be deleted on the correct thread to avoid racing with the
- // message loop thread.
- DCHECK(CalledOnValidThread());
-
- if (context_) {
- if (is_pending_) {
- // Make the context be deleted by the message pump when done.
- context_->handler = NULL;
- } else {
- delete context_;
- }
- }
-}
-
-// Implementation of IOHandler on Windows.
-void AsyncSocketIoHandler::OnIOCompleted(MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(context_, context);
- DCHECK(!read_complete_.is_null());
- is_pending_ = false;
- read_complete_.Run(error == ERROR_SUCCESS ? bytes_transfered : 0);
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!is_pending_);
- DCHECK_NE(socket_, base::SyncSocket::kInvalidHandle);
-
- DWORD bytes_read = 0;
- BOOL ok = ::ReadFile(socket_, buffer, buffer_len, &bytes_read,
- &context_->overlapped);
- // The completion port will be signaled regardless of completing the read
- // straight away or asynchronously (ERROR_IO_PENDING). OnIOCompleted() will
- // be called regardless and we don't need to explicitly run the callback
- // in the case where ok is FALSE and GLE==ERROR_IO_PENDING.
- is_pending_ = !ok && (GetLastError() == ERROR_IO_PENDING);
- return ok || is_pending_;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK(!context_);
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- MessageLoopForIO::current()->RegisterIOHandler(socket, this);
-
- context_ = new MessageLoopForIO::IOContext();
- context_->handler = this;
- memset(&context_->overlapped, 0, sizeof(context_->overlapped));
-
- return true;
-}
-
-} // namespace media.
diff --git a/src/media/audio/audio_buffers_state.cc b/src/media/audio/audio_buffers_state.cc
deleted file mode 100644
index 6c4f950..0000000
--- a/src/media/audio/audio_buffers_state.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_buffers_state.h"
-
-namespace media {
-
-AudioBuffersState::AudioBuffersState()
- : pending_bytes(0),
- hardware_delay_bytes(0) {
-}
-
-AudioBuffersState::AudioBuffersState(int pending_bytes,
- int hardware_delay_bytes)
- : pending_bytes(pending_bytes),
- hardware_delay_bytes(hardware_delay_bytes) {
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_buffers_state.h b/src/media/audio/audio_buffers_state.h
deleted file mode 100644
index 79244ae..0000000
--- a/src/media/audio/audio_buffers_state.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
-#define MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
-
-#include "media/base/media_export.h"
-
-namespace media {
-
-// AudioBuffersState struct stores current state of audio buffers.
-// It is used for audio synchronization.
-struct MEDIA_EXPORT AudioBuffersState {
- AudioBuffersState();
- AudioBuffersState(int pending_bytes, int hardware_delay_bytes);
-
- int total_bytes() {
- return pending_bytes + hardware_delay_bytes;
- }
-
- // Number of bytes we currently have in our software buffer.
- int pending_bytes;
-
- // Number of bytes that have been written to the device, but haven't
- // been played yet.
- int hardware_delay_bytes;
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_BUFFERS_STATE_H_
diff --git a/src/media/audio/audio_device_name.cc b/src/media/audio/audio_device_name.cc
deleted file mode 100644
index 02bb03f..0000000
--- a/src/media/audio/audio_device_name.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_device_name.h"
-
-namespace media {
-
-AudioDeviceName::AudioDeviceName() {}
-
-AudioDeviceName::AudioDeviceName(const std::string& device_name,
- const std::string& unique_id)
- : device_name(device_name),
- unique_id(unique_id) {
-}
-
-} // namespace media
-
diff --git a/src/media/audio/audio_device_name.h b/src/media/audio/audio_device_name.h
deleted file mode 100644
index aa3cca0..0000000
--- a/src/media/audio/audio_device_name.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
-#define MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
-
-#include <list>
-#include <string>
-#include "media/base/media_export.h"
-
-namespace media {
-
-struct MEDIA_EXPORT AudioDeviceName {
- AudioDeviceName();
- AudioDeviceName(const std::string& device_name,
- const std::string& unique_id);
-
- std::string device_name; // Friendly name of the device.
- std::string unique_id; // Unique identifier for the device.
-};
-
-typedef std::list<AudioDeviceName> AudioDeviceNames;
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_DEVICE_NAME_H_
diff --git a/src/media/audio/audio_device_thread.cc b/src/media/audio/audio_device_thread.cc
deleted file mode 100644
index 51d5ecd..0000000
--- a/src/media/audio/audio_device_thread.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_device_thread.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
-#include "base/message_loop.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_restrictions.h"
-#include "media/audio/audio_util.h"
-#include "media/base/audio_bus.h"
-
-using base::PlatformThread;
-
-namespace media {
-
-// The actual worker thread implementation. It's very bare bones and much
-// simpler than SimpleThread (no synchronization in Start, etc) and supports
-// joining the thread handle asynchronously via a provided message loop even
-// after the Thread object itself has been deleted.
-class AudioDeviceThread::Thread
- : public PlatformThread::Delegate,
- public base::RefCountedThreadSafe<AudioDeviceThread::Thread> {
- public:
- Thread(AudioDeviceThread::Callback* callback,
- base::SyncSocket::Handle socket,
- const char* thread_name);
-
- void Start();
-
- // Stops the thread. If |loop_for_join| is non-NULL, the function posts
- // a task to join (close) the thread handle later instead of waiting for
- // the thread. If loop_for_join is NULL, then the function waits
- // synchronously for the thread to terminate.
- void Stop(MessageLoop* loop_for_join);
-
- private:
- friend class base::RefCountedThreadSafe<AudioDeviceThread::Thread>;
- virtual ~Thread();
-
- // Overrides from PlatformThread::Delegate.
- virtual void ThreadMain() OVERRIDE;
-
- // Runs the loop that reads from the socket.
- void Run();
-
- private:
- base::PlatformThreadHandle thread_;
- AudioDeviceThread::Callback* callback_;
- base::CancelableSyncSocket socket_;
- base::Lock callback_lock_;
- const char* thread_name_;
-
- DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-// AudioDeviceThread implementation
-
-AudioDeviceThread::AudioDeviceThread() {
-}
-
-AudioDeviceThread::~AudioDeviceThread() {
- DCHECK(!thread_);
-}
-
-void AudioDeviceThread::Start(AudioDeviceThread::Callback* callback,
- base::SyncSocket::Handle socket,
- const char* thread_name) {
- base::AutoLock auto_lock(thread_lock_);
- CHECK(thread_ == NULL);
- thread_ = new AudioDeviceThread::Thread(callback, socket, thread_name);
- thread_->Start();
-}
-
-void AudioDeviceThread::Stop(MessageLoop* loop_for_join) {
- base::AutoLock auto_lock(thread_lock_);
- if (thread_) {
- thread_->Stop(loop_for_join);
- thread_ = NULL;
- }
-}
-
-bool AudioDeviceThread::IsStopped() {
- base::AutoLock auto_lock(thread_lock_);
- return thread_ == NULL;
-}
-
-// AudioDeviceThread::Thread implementation
-AudioDeviceThread::Thread::Thread(AudioDeviceThread::Callback* callback,
- base::SyncSocket::Handle socket,
- const char* thread_name)
- : thread_(base::kNullThreadHandle),
- callback_(callback),
- socket_(socket),
- thread_name_(thread_name) {
-}
-
-AudioDeviceThread::Thread::~Thread() {
- DCHECK_EQ(thread_, base::kNullThreadHandle) << "Stop wasn't called";
-}
-
-void AudioDeviceThread::Thread::Start() {
- base::AutoLock auto_lock(callback_lock_);
- DCHECK_EQ(thread_, base::kNullThreadHandle);
- // This reference will be released when the thread exists.
- AddRef();
-
- PlatformThread::CreateWithPriority(0, this, &thread_,
- base::kThreadPriority_RealtimeAudio);
- CHECK(thread_ != base::kNullThreadHandle);
-}
-
-void AudioDeviceThread::Thread::Stop(MessageLoop* loop_for_join) {
- socket_.Shutdown();
-
- base::PlatformThreadHandle thread = base::kNullThreadHandle;
-
- { // NOLINT
- base::AutoLock auto_lock(callback_lock_);
- callback_ = NULL;
- std::swap(thread, thread_);
- }
-
- if (thread != base::kNullThreadHandle) {
- if (loop_for_join) {
- loop_for_join->PostTask(FROM_HERE,
- base::Bind(&base::PlatformThread::Join, thread));
- } else {
- base::PlatformThread::Join(thread);
- }
- }
-}
-
-void AudioDeviceThread::Thread::ThreadMain() {
- PlatformThread::SetName(thread_name_);
-
- // Singleton access is safe from this thread as long as callback is non-NULL.
- // The callback is the only point where the thread calls out to 'unknown' code
- // that might touch singletons and the lifetime of the callback is controlled
- // by another thread on which singleton access is OK as well.
- base::ThreadRestrictions::SetSingletonAllowed(true);
-
- { // NOLINT
- base::AutoLock auto_lock(callback_lock_);
- if (callback_)
- callback_->InitializeOnAudioThread();
- }
-
- Run();
-
- // Release the reference for the thread. Note that after this, the Thread
- // instance will most likely be deleted.
- Release();
-}
-
-void AudioDeviceThread::Thread::Run() {
- while (true) {
- int pending_data = 0;
- size_t bytes_read = socket_.Receive(&pending_data, sizeof(pending_data));
- if (bytes_read != sizeof(pending_data)) {
- DCHECK_EQ(bytes_read, 0U);
- break;
- }
-
- base::AutoLock auto_lock(callback_lock_);
- if (callback_)
- callback_->Process(pending_data);
- }
-}
-
-// AudioDeviceThread::Callback implementation
-
-AudioDeviceThread::Callback::Callback(
- const AudioParameters& audio_parameters,
- int input_channels,
- base::SharedMemoryHandle memory, int memory_length)
- : audio_parameters_(audio_parameters),
- input_channels_(input_channels),
- samples_per_ms_(audio_parameters.sample_rate() / 1000),
- bytes_per_ms_(audio_parameters.channels() *
- (audio_parameters_.bits_per_sample() / 8) *
- samples_per_ms_),
- shared_memory_(memory, false),
- memory_length_(memory_length) {
- CHECK_NE(bytes_per_ms_, 0); // Catch division by zero early.
- CHECK_NE(samples_per_ms_, 0);
-}
-
-AudioDeviceThread::Callback::~Callback() {}
-
-void AudioDeviceThread::Callback::InitializeOnAudioThread() {
- MapSharedMemory();
- DCHECK(shared_memory_.memory() != NULL);
-}
-
-} // namespace media.
diff --git a/src/media/audio/audio_device_thread.h b/src/media/audio/audio_device_thread.h
deleted file mode 100644
index 44dbc3a..0000000
--- a/src/media/audio/audio_device_thread.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
-#define MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/shared_memory.h"
-#include "base/sync_socket.h"
-#include "base/synchronization/lock.h"
-#include "media/base/media_export.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/shared_memory_util.h"
-
-class MessageLoop;
-
-namespace media {
-class AudioBus;
-
-// Data transfer between browser and render process uses a combination
-// of sync sockets and shared memory. To read from the socket and render
-// data, we use a worker thread, a.k.a. the AudioDeviceThread, which reads
-// data from the browser via the socket and fills the shared memory from the
-// audio thread via the AudioDeviceThread::Callback interface/class.
-// For more details see the documentation in audio_device.h.
-//
-// TODO(tommi): Multiple audio input/output device instances should be able to
-// share the same thread instead of spinning one per instance.
-class MEDIA_EXPORT AudioDeviceThread {
- public:
- // This is the callback interface/base class that Audio[Output|Input]Device
- // implements to render input/output data. The callbacks run on the
- // thread owned by AudioDeviceThread.
- class Callback {
- public:
- Callback(const AudioParameters& audio_parameters,
- int input_channels,
- base::SharedMemoryHandle memory,
- int memory_length);
- virtual ~Callback();
-
- // One time initialization for the callback object on the audio thread.
- void InitializeOnAudioThread();
-
- // Derived implementations must call shared_memory_.Map appropriately
- // before Process can be called.
- virtual void MapSharedMemory() = 0;
-
- // Called whenever we receive notifications about pending data.
- virtual void Process(int pending_data) = 0;
-
- protected:
- // Protected so that derived classes can access directly.
- // The variables are 'const' since values are calculated/set in the
- // constructor and must never change.
- const AudioParameters audio_parameters_;
- const int input_channels_;
- const int samples_per_ms_;
- const int bytes_per_ms_;
-
- base::SharedMemory shared_memory_;
- const int memory_length_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Callback);
- };
-
- AudioDeviceThread();
- ~AudioDeviceThread();
-
- // Starts the audio thread. The thread must not already be running.
- void Start(AudioDeviceThread::Callback* callback,
- base::SyncSocket::Handle socket,
- const char* thread_name);
-
- // This tells the audio thread to stop and clean up the data.
- // The method can stop the thread synchronously or asynchronously.
- // In the latter case, the thread will still be running after Stop()
- // returns, but the callback pointer is cleared so no further callbacks will
- // be made (IOW after Stop() returns, it is safe to delete the callback).
- // The |loop_for_join| parameter is required for asynchronous operation
- // in order to join the worker thread and close the thread handle later via a
- // posted task.
- // If set to NULL, function will wait for the thread to exit before returning.
- void Stop(MessageLoop* loop_for_join);
-
- // Returns true if the thread is stopped or stopping.
- bool IsStopped();
-
- private:
- // Our own private SimpleThread override. We implement this in a
- // private class so that we get the following benefits:
- // 1) AudioDeviceThread doesn't expose SimpleThread methods.
- // I.e. the caller can't call Start()/Stop() - which would be bad.
- // 2) We override ThreadMain to add additional on-thread initialization
- // while still synchronized with SimpleThread::Start() to provide
- // reliable initialization.
- class Thread;
-
- base::Lock thread_lock_;
- scoped_refptr<AudioDeviceThread::Thread> thread_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDeviceThread);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_AUDIO_DEVICE_THREAD_H_
diff --git a/src/media/audio/audio_input_controller.cc b/src/media/audio/audio_input_controller.cc
deleted file mode 100644
index 99bb420..0000000
--- a/src/media/audio/audio_input_controller.cc
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_input_controller.h"
-
-#include "base/bind.h"
-#include "base/threading/thread_restrictions.h"
-#include "media/base/limits.h"
-
-namespace {
-const int kMaxInputChannels = 2;
-const int kTimerResetIntervalSeconds = 1;
-#if defined(OS_IOS)
-// The first callback on iOS is received after the current background
-// audio has faded away.
-const int kTimerInitialIntervalSeconds = 4;
-#else
-const int kTimerInitialIntervalSeconds = 1;
-#endif // defined(OS_IOS)
-}
-
-namespace media {
-
-// static
-AudioInputController::Factory* AudioInputController::factory_ = NULL;
-
-AudioInputController::AudioInputController(EventHandler* handler,
- SyncWriter* sync_writer)
- : creator_loop_(base::MessageLoopProxy::current()),
- handler_(handler),
- stream_(NULL),
- data_is_active_(false),
- state_(kEmpty),
- sync_writer_(sync_writer),
- max_volume_(0.0) {
- DCHECK(creator_loop_);
-}
-
-AudioInputController::~AudioInputController() {
- DCHECK(kClosed == state_ || kCreated == state_ || kEmpty == state_);
-}
-
-// static
-scoped_refptr<AudioInputController> AudioInputController::Create(
- AudioManager* audio_manager,
- EventHandler* event_handler,
- const AudioParameters& params) {
- DCHECK(audio_manager);
-
- if (!params.IsValid() || (params.channels() > kMaxInputChannels))
- return NULL;
-
- if (factory_)
- return factory_->Create(audio_manager, event_handler, params);
-
- scoped_refptr<AudioInputController> controller(new AudioInputController(
- event_handler, NULL));
-
- controller->message_loop_ = audio_manager->GetMessageLoop();
-
- // Create and open a new audio input stream from the existing
- // audio-device thread. Use the default audio-input device.
- std::string device_id = AudioManagerBase::kDefaultDeviceId;
- if (!controller->message_loop_->PostTask(FROM_HERE,
- base::Bind(&AudioInputController::DoCreate, controller,
- base::Unretained(audio_manager), params, device_id))) {
- controller = NULL;
- }
-
- return controller;
-}
-
-// static
-scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
- AudioManager* audio_manager,
- EventHandler* event_handler,
- const AudioParameters& params,
- const std::string& device_id,
- SyncWriter* sync_writer) {
- DCHECK(audio_manager);
- DCHECK(sync_writer);
-
- if (!params.IsValid() || (params.channels() > kMaxInputChannels))
- return NULL;
-
- // Create the AudioInputController object and ensure that it runs on
- // the audio-manager thread.
- scoped_refptr<AudioInputController> controller(new AudioInputController(
- event_handler, sync_writer));
- controller->message_loop_ = audio_manager->GetMessageLoop();
-
- // Create and open a new audio input stream from the existing
- // audio-device thread. Use the provided audio-input device.
- if (!controller->message_loop_->PostTask(FROM_HERE,
- base::Bind(&AudioInputController::DoCreate, controller,
- base::Unretained(audio_manager), params, device_id))) {
- controller = NULL;
- }
-
- return controller;
-}
-
-void AudioInputController::Record() {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioInputController::DoRecord, this));
-}
-
-void AudioInputController::Close(const base::Closure& closed_task) {
- DCHECK(!closed_task.is_null());
- DCHECK(creator_loop_->BelongsToCurrentThread());
-
- message_loop_->PostTaskAndReply(
- FROM_HERE, base::Bind(&AudioInputController::DoClose, this), closed_task);
-}
-
-void AudioInputController::SetVolume(double volume) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioInputController::DoSetVolume, this, volume));
-}
-
-void AudioInputController::SetAutomaticGainControl(bool enabled) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioInputController::DoSetAutomaticGainControl, this, enabled));
-}
-
-void AudioInputController::DoCreate(AudioManager* audio_manager,
- const AudioParameters& params,
- const std::string& device_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- stream_ = audio_manager->MakeAudioInputStream(params, device_id);
-
- if (!stream_) {
- // TODO(satish): Define error types.
- handler_->OnError(this, 0);
- return;
- }
-
- if (stream_ && !stream_->Open()) {
- stream_->Close();
- stream_ = NULL;
- // TODO(satish): Define error types.
- handler_->OnError(this, 0);
- return;
- }
-
- DCHECK(!no_data_timer_.get());
- // Create the data timer which will call DoCheckForNoData(). The timer
- // is started in DoRecord() and restarted in each DoCheckForNoData() callback.
- no_data_timer_.reset(new base::Timer(
- FROM_HERE, base::TimeDelta::FromSeconds(kTimerInitialIntervalSeconds),
- base::Bind(&AudioInputController::DoCheckForNoData,
- base::Unretained(this)), false));
- state_ = kCreated;
- handler_->OnCreated(this);
-}
-
-void AudioInputController::DoRecord() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ != kCreated)
- return;
-
- {
- base::AutoLock auto_lock(lock_);
- state_ = kRecording;
- }
-
- // Start the data timer. Once |kTimerResetIntervalSeconds| have passed,
- // a callback to DoCheckForNoData() is made.
- no_data_timer_->Reset();
-
- stream_->Start(this);
- handler_->OnRecording(this);
-}
-
-void AudioInputController::DoClose() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Delete the timer on the same thread that created it.
- no_data_timer_.reset();
-
- if (state_ != kClosed) {
- DoStopCloseAndClearStream(NULL);
- SetDataIsActive(false);
-
- if (LowLatencyMode()) {
- sync_writer_->Close();
- }
-
- state_ = kClosed;
- }
-}
-
-void AudioInputController::DoReportError(int code) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- handler_->OnError(this, code);
-}
-
-void AudioInputController::DoSetVolume(double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_GE(volume, 0);
- DCHECK_LE(volume, 1.0);
-
- if (state_ != kCreated && state_ != kRecording)
- return;
-
- // Only ask for the maximum volume at first call and use cached value
- // for remaining function calls.
- if (!max_volume_) {
- max_volume_ = stream_->GetMaxVolume();
- }
-
- if (max_volume_ == 0.0) {
- DLOG(WARNING) << "Failed to access input volume control";
- return;
- }
-
- // Set the stream volume and scale to a range matched to the platform.
- stream_->SetVolume(max_volume_ * volume);
-}
-
-void AudioInputController::DoSetAutomaticGainControl(bool enabled) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(state_, kRecording);
-
- // Ensure that the AGC state only can be modified before streaming starts.
- if (state_ != kCreated || state_ == kRecording)
- return;
-
- stream_->SetAutomaticGainControl(enabled);
-}
-
-void AudioInputController::DoCheckForNoData() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (!GetDataIsActive()) {
- // The data-is-active marker will be false only if it has been more than
- // one second since a data packet was recorded. This can happen if a
- // capture device has been removed or disabled.
- handler_->OnError(this, 0);
- return;
- }
-
- // Mark data as non-active. The flag will be re-enabled in OnData() each
- // time a data packet is received. Hence, under normal conditions, the
- // flag will only be disabled during a very short period.
- SetDataIsActive(false);
-
- // Restart the timer to ensure that we check the flag again in
- // |kTimerResetIntervalSeconds|.
- no_data_timer_->Start(
- FROM_HERE, base::TimeDelta::FromSeconds(kTimerResetIntervalSeconds),
- base::Bind(&AudioInputController::DoCheckForNoData,
- base::Unretained(this)));
-}
-
-void AudioInputController::OnData(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes,
- double volume) {
- {
- base::AutoLock auto_lock(lock_);
- if (state_ != kRecording)
- return;
- }
-
- // Mark data as active to ensure that the periodic calls to
- // DoCheckForNoData() does not report an error to the event handler.
- SetDataIsActive(true);
-
- // Use SyncSocket if we are in a low-latency mode.
- if (LowLatencyMode()) {
- sync_writer_->Write(data, size, volume);
- sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
- return;
- }
-
- handler_->OnData(this, data, size);
-}
-
-void AudioInputController::OnClose(AudioInputStream* stream) {
- DVLOG(1) << "AudioInputController::OnClose()";
- // TODO(satish): Sometimes the device driver closes the input stream without
- // us asking for it (may be if the device was unplugged?). Check how to handle
- // such cases here.
-}
-
-void AudioInputController::OnError(AudioInputStream* stream, int code) {
- // Handle error on the audio-manager thread.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioInputController::DoReportError, this, code));
-}
-
-void AudioInputController::DoStopCloseAndClearStream(
- base::WaitableEvent *done) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Allow calling unconditionally and bail if we don't have a stream to close.
- if (stream_ != NULL) {
- stream_->Stop();
- stream_->Close();
- stream_ = NULL;
- }
-
- // Should be last in the method, do not touch "this" from here on.
- if (done != NULL)
- done->Signal();
-}
-
-void AudioInputController::SetDataIsActive(bool enabled) {
- base::subtle::Release_Store(&data_is_active_, enabled);
-}
-
-bool AudioInputController::GetDataIsActive() {
- return (base::subtle::Acquire_Load(&data_is_active_) != false);
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_controller.h b/src/media/audio/audio_input_controller.h
deleted file mode 100644
index 77e3e87..0000000
--- a/src/media/audio/audio_input_controller.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
-#define MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
-
-#include <string>
-#include "base/atomicops.h"
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/thread.h"
-#include "base/timer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-
-// An AudioInputController controls an AudioInputStream and records data
-// from this input stream. The two main methods are Record() and Close() and
-// they are both executed on the audio thread which is injected by the two
-// alternative factory methods, Create() or CreateLowLatency().
-//
-// All public methods of AudioInputController are non-blocking.
-//
-// Here is a state diagram for the AudioInputController:
-//
-// .--> [ Closed / Error ] <--.
-// | |
-// | |
-// [ Created ] ----------> [ Recording ]
-// ^
-// |
-// *[ Empty ]
-//
-// * Initial state
-//
-// State sequences (assuming low-latency):
-//
-// [Creating Thread] [Audio Thread]
-//
-// User AudioInputController EventHandler
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-// CrateLowLatency() ==> DoCreate()
-// AudioManager::MakeAudioInputStream()
-// AudioInputStream::Open()
-// .- - - - - - - - - - - - -> OnError()
-// create the data timer
-// .-------------------------> OnCreated()
-// kCreated
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-// Record() ==> DoRecord()
-// AudioInputStream::Start()
-// .-------------------------> OnRecording()
-// start the data timer
-// kRecording
-// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-// Close() ==> DoClose()
-// delete the data timer
-// state_ = kClosed
-// AudioInputStream::Stop()
-// AudioInputStream::Close()
-// SyncWriter::Close()
-// Closure::Run() <-----------------.
-// (closure-task)
-//
-// The audio thread itself is owned by the AudioManager that the
-// AudioInputController holds a reference to. When performing tasks on the
-// audio thread, the controller must not add or release references to the
-// AudioManager or itself (since it in turn holds a reference to the manager).
-//
-namespace media {
-
-class MEDIA_EXPORT AudioInputController
- : public base::RefCountedThreadSafe<AudioInputController>,
- public AudioInputStream::AudioInputCallback {
- public:
- // An event handler that receives events from the AudioInputController. The
- // following methods are all called on the audio thread.
- class MEDIA_EXPORT EventHandler {
- public:
- virtual void OnCreated(AudioInputController* controller) = 0;
- virtual void OnRecording(AudioInputController* controller) = 0;
- virtual void OnError(AudioInputController* controller, int error_code) = 0;
- virtual void OnData(AudioInputController* controller, const uint8* data,
- uint32 size) = 0;
-
- protected:
- virtual ~EventHandler() {}
- };
-
- // A synchronous writer interface used by AudioInputController for
- // synchronous writing.
- class SyncWriter {
- public:
- virtual ~SyncWriter() {}
-
- // Notify the synchronous writer about the number of bytes in the
- // soundcard which has been recorded.
- virtual void UpdateRecordedBytes(uint32 bytes) = 0;
-
- // Write certain amount of data from |data|. This method returns
- // number of written bytes.
- virtual uint32 Write(const void* data, uint32 size, double volume) = 0;
-
- // Close this synchronous writer.
- virtual void Close() = 0;
- };
-
- // AudioInputController::Create() can use the currently registered Factory
- // to create the AudioInputController. Factory is intended for testing only.
- class Factory {
- public:
- virtual AudioInputController* Create(AudioManager* audio_manager,
- EventHandler* event_handler,
- AudioParameters params) = 0;
- protected:
- virtual ~Factory() {}
- };
-
- // Factory method for creating an AudioInputController.
- // The audio device will be created on the audio thread, and when that is
- // done, the event handler will receive an OnCreated() call from that same
- // thread.
- static scoped_refptr<AudioInputController> Create(
- AudioManager* audio_manager,
- EventHandler* event_handler,
- const AudioParameters& params);
-
- // Sets the factory used by the static method Create(). AudioInputController
- // does not take ownership of |factory|. A value of NULL results in an
- // AudioInputController being created directly.
- static void set_factory_for_testing(Factory* factory) { factory_ = factory; }
- AudioInputStream* stream_for_testing() { return stream_; }
-
- // Factory method for creating an AudioInputController for low-latency mode.
- // The audio device will be created on the audio thread, and when that is
- // done, the event handler will receive an OnCreated() call from that same
- // thread.
- static scoped_refptr<AudioInputController> CreateLowLatency(
- AudioManager* audio_manager,
- EventHandler* event_handler,
- const AudioParameters& params,
- const std::string& device_id,
- // External synchronous writer for audio controller.
- SyncWriter* sync_writer);
-
- // Starts recording using the created audio input stream.
- // This method is called on the creator thread.
- virtual void Record();
-
- // Closes the audio input stream. The state is changed and the resources
- // are freed on the audio thread. |closed_task| is then executed on the thread
- // that called Close().
- // Callbacks (EventHandler and SyncWriter) must exist until |closed_task|
- // is called.
- // It is safe to call this method more than once. Calls after the first one
- // will have no effect.
- // This method trampolines to the audio thread.
- virtual void Close(const base::Closure& closed_task);
-
- // Sets the capture volume of the input stream. The value 0.0 corresponds
- // to muted and 1.0 to maximum volume.
- virtual void SetVolume(double volume);
-
- // Sets the Automatic Gain Control (AGC) state of the input stream.
- // Changing the AGC state is not supported while recording is active.
- virtual void SetAutomaticGainControl(bool enabled);
-
- // AudioInputCallback implementation. Threading details depends on the
- // device-specific implementation.
- virtual void OnData(AudioInputStream* stream, const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE;
- virtual void OnClose(AudioInputStream* stream) OVERRIDE;
- virtual void OnError(AudioInputStream* stream, int code) OVERRIDE;
-
- bool LowLatencyMode() const { return sync_writer_ != NULL; }
-
- protected:
- friend class base::RefCountedThreadSafe<AudioInputController>;
-
- // Internal state of the source.
- enum State {
- kEmpty,
- kCreated,
- kRecording,
- kClosed,
- kError
- };
-
- AudioInputController(EventHandler* handler, SyncWriter* sync_writer);
- virtual ~AudioInputController();
-
- // Methods called on the audio thread (owned by the AudioManager).
- void DoCreate(AudioManager* audio_manager, const AudioParameters& params,
- const std::string& device_id);
- void DoRecord();
- void DoClose();
- void DoReportError(int code);
- void DoSetVolume(double volume);
- void DoSetAutomaticGainControl(bool enabled);
-
- // Method which ensures that OnError() is triggered when data recording
- // times out. Called on the audio thread.
- void DoCheckForNoData();
-
- // Helper method that stops, closes, and NULL:s |*stream_|.
- // Signals event when done if the event is not NULL.
- void DoStopCloseAndClearStream(base::WaitableEvent* done);
-
- void SetDataIsActive(bool enabled);
- bool GetDataIsActive();
-
- // Gives access to the message loop of the creating thread.
- scoped_refptr<base::MessageLoopProxy> creator_loop_;
-
- // The message loop of audio-manager thread that this object runs on.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
-
- // Contains the AudioInputController::EventHandler which receives state
- // notifications from this class.
- EventHandler* handler_;
-
- // Pointer to the audio input stream object.
- AudioInputStream* stream_;
-
- // |no_data_timer_| is used to call OnError() when we stop receiving
- // OnData() calls without an OnClose() call. This can occur
- // when an audio input device is unplugged whilst recording on Windows.
- // See http://crbug.com/79936 for details.
- // This member is only touched by the audio thread.
- scoped_ptr<base::Timer> no_data_timer_;
-
- // This flag is used to signal that we are receiving OnData() calls, i.e,
- // that data is active. It can be touched by the audio thread and by the
- // low-level audio thread which calls OnData(). E.g. on Windows, the
- // low-level audio thread is called wasapi_capture_thread.
- base::subtle::Atomic32 data_is_active_;
-
- // |state_| is written on the audio thread and is read on the hardware audio
- // thread. These operations need to be locked. But lock is not required for
- // reading on the audio input controller thread.
- State state_;
-
- base::Lock lock_;
-
- // SyncWriter is used only in low-latency mode for synchronous writing.
- SyncWriter* sync_writer_;
-
- static Factory* factory_;
-
- double max_volume_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioInputController);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_INPUT_CONTROLLER_H_
diff --git a/src/media/audio/audio_input_controller_unittest.cc b/src/media/audio/audio_input_controller_unittest.cc
deleted file mode 100644
index 0a2a39b..0000000
--- a/src/media/audio/audio_input_controller_unittest.cc
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/test_timeouts.h"
-#include "media/audio/audio_input_controller.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::Exactly;
-using ::testing::InvokeWithoutArgs;
-using ::testing::NotNull;
-
-namespace media {
-
-static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
-static const int kBitsPerSample = 16;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
-static const int kSamplesPerPacket = kSampleRate / 10;
-
-// Posts MessageLoop::QuitClosure() on specified message loop.
-ACTION_P(QuitMessageLoop, loop_or_proxy) {
- loop_or_proxy->PostTask(FROM_HERE, MessageLoop::QuitClosure());
-}
-
-// Posts MessageLoop::QuitClosure() on specified message loop after a certain
-// number of calls given by |limit|.
-ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop_or_proxy) {
- if (++*count >= limit) {
- loop_or_proxy->PostTask(FROM_HERE, MessageLoop::QuitClosure());
- }
-}
-
-// Closes AudioOutputController synchronously.
-static void CloseAudioController(AudioInputController* controller) {
- controller->Close(MessageLoop::QuitClosure());
- MessageLoop::current()->Run();
-}
-
-class MockAudioInputControllerEventHandler
- : public AudioInputController::EventHandler {
- public:
- MockAudioInputControllerEventHandler() {}
-
- MOCK_METHOD1(OnCreated, void(AudioInputController* controller));
- MOCK_METHOD1(OnRecording, void(AudioInputController* controller));
- MOCK_METHOD2(OnError, void(AudioInputController* controller, int error_code));
- MOCK_METHOD3(OnData, void(AudioInputController* controller,
- const uint8* data, uint32 size));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioInputControllerEventHandler);
-};
-
-// Test fixture.
-class AudioInputControllerTest : public testing::Test {
- public:
- AudioInputControllerTest() {}
- virtual ~AudioInputControllerTest() {}
-
- protected:
- MessageLoop message_loop_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioInputControllerTest);
-};
-
-// Test AudioInputController for create and close without recording audio.
-TEST_F(AudioInputControllerTest, CreateAndClose) {
- MockAudioInputControllerEventHandler event_handler;
-
- // OnCreated() will be posted once.
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .WillOnce(QuitMessageLoop(&message_loop_));
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params);
- ASSERT_TRUE(controller.get());
-
- // Wait for OnCreated() to fire.
- message_loop_.Run();
-
- // Close the AudioInputController synchronously.
- CloseAudioController(controller);
-}
-
-// Test a normal call sequence of create, record and close.
-TEST_F(AudioInputControllerTest, RecordAndClose) {
- MockAudioInputControllerEventHandler event_handler;
- int count = 0;
-
- // OnCreated() will be called once.
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .Times(Exactly(1));
-
- // OnRecording() will be called only once.
- EXPECT_CALL(event_handler, OnRecording(NotNull()))
- .Times(Exactly(1));
-
- // OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
-
- // Creating the AudioInputController should render an OnCreated() call.
- scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params);
- ASSERT_TRUE(controller.get());
-
- // Start recording and trigger one OnRecording() call.
- controller->Record();
-
- // Record and wait until ten OnData() callbacks are received.
- message_loop_.Run();
-
- // Close the AudioInputController synchronously.
- CloseAudioController(controller);
-}
-
-// Test that the AudioInputController reports an error when the input stream
-// stops without an OnClose() callback. This can happen when the underlying
-// audio layer stops feeding data as a result of a removed microphone device.
-TEST_F(AudioInputControllerTest, RecordAndError) {
- MockAudioInputControllerEventHandler event_handler;
- int count = 0;
-
- // OnCreated() will be called once.
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .Times(Exactly(1));
-
- // OnRecording() will be called only once.
- EXPECT_CALL(event_handler, OnRecording(NotNull()))
- .Times(Exactly(1));
-
- // OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
-
- // OnError() will be called after the data stream stops while the
- // controller is in a recording state.
- EXPECT_CALL(event_handler, OnError(NotNull(), 0))
- .Times(Exactly(1))
- .WillOnce(QuitMessageLoop(&message_loop_));
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
-
- // Creating the AudioInputController should render an OnCreated() call.
- scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params);
- ASSERT_TRUE(controller.get());
-
- // Start recording and trigger one OnRecording() call.
- controller->Record();
-
- // Record and wait until ten OnData() callbacks are received.
- message_loop_.Run();
-
- // Stop the stream and verify that OnError() is posted.
- AudioInputStream* stream = controller->stream_for_testing();
- stream->Stop();
- message_loop_.Run();
-
- // Close the AudioInputController synchronously.
- CloseAudioController(controller);
-}
-
-// Test that AudioInputController rejects insanely large packet sizes.
-TEST_F(AudioInputControllerTest, SamplesPerPacketTooLarge) {
- // Create an audio device with a very large packet size.
- MockAudioInputControllerEventHandler event_handler;
-
- // OnCreated() shall not be called in this test.
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .Times(Exactly(0));
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket * 1000);
- scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params);
- ASSERT_FALSE(controller);
-}
-
-// Test calling AudioInputController::Close multiple times.
-TEST_F(AudioInputControllerTest, CloseTwice) {
- MockAudioInputControllerEventHandler event_handler;
-
- // OnRecording() will be called only once.
- EXPECT_CALL(event_handler, OnCreated(NotNull()));
-
- // OnRecording() will be called only once.
- EXPECT_CALL(event_handler, OnRecording(NotNull()))
- .Times(Exactly(1));
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioInputController> controller =
- AudioInputController::Create(audio_manager.get(), &event_handler, params);
- ASSERT_TRUE(controller.get());
-
- controller->Record();
-
- controller->Close(MessageLoop::QuitClosure());
- MessageLoop::current()->Run();
-
- controller->Close(MessageLoop::QuitClosure());
- MessageLoop::current()->Run();
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_device.cc b/src/media/audio/audio_input_device.cc
deleted file mode 100644
index 9edf6db..0000000
--- a/src/media/audio/audio_input_device.cc
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_input_device.h"
-
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/time.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/base/audio_bus.h"
-
-namespace media {
-
-// Takes care of invoking the capture callback on the audio thread.
-// An instance of this class is created for each capture stream in
-// OnLowLatencyCreated().
-class AudioInputDevice::AudioThreadCallback
- : public AudioDeviceThread::Callback {
- public:
- AudioThreadCallback(const AudioParameters& audio_parameters,
- base::SharedMemoryHandle memory,
- int memory_length,
- CaptureCallback* capture_callback);
- virtual ~AudioThreadCallback();
-
- virtual void MapSharedMemory() OVERRIDE;
-
- // Called whenever we receive notifications about pending data.
- virtual void Process(int pending_data) OVERRIDE;
-
- private:
- CaptureCallback* capture_callback_;
- scoped_ptr<AudioBus> audio_bus_;
- DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
-};
-
-AudioInputDevice::AudioInputDevice(
- AudioInputIPC* ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
- callback_(NULL),
- event_handler_(NULL),
- ipc_(ipc),
- stream_id_(0),
- session_id_(0),
- pending_device_ready_(false),
- agc_is_enabled_(false) {
- CHECK(ipc_);
-}
-
-void AudioInputDevice::Initialize(const AudioParameters& params,
- CaptureCallback* callback,
- CaptureEventHandler* event_handler) {
- DCHECK(!callback_);
- DCHECK(!event_handler_);
- audio_parameters_ = params;
- callback_ = callback;
- event_handler_ = event_handler;
-}
-
-void AudioInputDevice::SetDevice(int session_id) {
- DVLOG(1) << "SetDevice (session_id=" << session_id << ")";
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::SetSessionIdOnIOThread, this, session_id));
-}
-
-void AudioInputDevice::Start() {
- DVLOG(1) << "Start()";
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::InitializeOnIOThread, this));
-}
-
-void AudioInputDevice::Stop() {
- DVLOG(1) << "Stop()";
-
- {
- base::AutoLock auto_lock(audio_thread_lock_);
- audio_thread_.Stop(MessageLoop::current());
- }
-
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::ShutDownOnIOThread, this));
-}
-
-void AudioInputDevice::SetVolume(double volume) {
- if (volume < 0 || volume > 1.0) {
- DLOG(ERROR) << "Invalid volume value specified";
- return;
- }
-
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::SetVolumeOnIOThread, this, volume));
-}
-
-void AudioInputDevice::SetAutomaticGainControl(bool enabled) {
- DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::SetAutomaticGainControlOnIOThread,
- this, enabled));
-}
-
-void AudioInputDevice::OnStreamCreated(
- base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) {
- DCHECK(message_loop()->BelongsToCurrentThread());
-#if defined(OS_WIN)
- DCHECK(handle);
- DCHECK(socket_handle);
-#elif defined(__LB_SHELL__) || defined(COBALT)
- DCHECK(handle.get());
-#else
- DCHECK_GE(handle.fd, 0);
- DCHECK_GE(socket_handle, 0);
-#endif
- DCHECK(length);
- DVLOG(1) << "OnStreamCreated (stream_id=" << stream_id_ << ")";
-
- // We should only get this callback if stream_id_ is valid. If it is not,
- // the IPC layer should have closed the shared memory and socket handles
- // for us and not invoked the callback. The basic assertion is that when
- // stream_id_ is 0 the AudioInputDevice instance is not registered as a
- // delegate and hence it should not receive callbacks.
- DCHECK(stream_id_);
-
- base::AutoLock auto_lock(audio_thread_lock_);
-
- DCHECK(audio_thread_.IsStopped());
- audio_callback_.reset(
- new AudioInputDevice::AudioThreadCallback(audio_parameters_, handle,
- length, callback_));
- audio_thread_.Start(audio_callback_.get(), socket_handle, "AudioInputDevice");
-
- MessageLoop::current()->PostTask(FROM_HERE,
- base::Bind(&AudioInputDevice::StartOnIOThread, this));
-}
-
-void AudioInputDevice::OnVolume(double volume) {
- NOTIMPLEMENTED();
-}
-
-void AudioInputDevice::OnStateChanged(
- AudioInputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
-
- // Do nothing if the stream has been closed.
- if (!stream_id_)
- return;
-
- switch (state) {
- case AudioInputIPCDelegate::kStopped:
- // TODO(xians): Should we just call ShutDownOnIOThread here instead?
- ipc_->RemoveDelegate(stream_id_);
-
- audio_thread_.Stop(MessageLoop::current());
- audio_callback_.reset();
-
- if (event_handler_)
- event_handler_->OnDeviceStopped();
-
- stream_id_ = 0;
- pending_device_ready_ = false;
- break;
- case AudioInputIPCDelegate::kRecording:
- NOTIMPLEMENTED();
- break;
- case AudioInputIPCDelegate::kError:
- DLOG(WARNING) << "AudioInputDevice::OnStateChanged(kError)";
- // Don't dereference the callback object if the audio thread
- // is stopped or stopping. That could mean that the callback
- // object has been deleted.
- // TODO(tommi): Add an explicit contract for clearing the callback
- // object. Possibly require calling Initialize again or provide
- // a callback object via Start() and clear it in Stop().
- if (!audio_thread_.IsStopped())
- callback_->OnCaptureError();
- break;
- default:
- NOTREACHED();
- break;
- }
-}
-
-void AudioInputDevice::OnDeviceReady(const std::string& device_id) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- DVLOG(1) << "OnDeviceReady (device_id=" << device_id << ")";
-
- // Takes care of the case when Stop() is called before OnDeviceReady().
- if (!pending_device_ready_)
- return;
-
- // If AudioInputDeviceManager returns an empty string, it means no device
- // is ready for start.
- if (device_id.empty()) {
- ipc_->RemoveDelegate(stream_id_);
- stream_id_ = 0;
- } else {
- ipc_->CreateStream(stream_id_, audio_parameters_, device_id,
- agc_is_enabled_);
- }
-
- pending_device_ready_ = false;
- // Notify the client that the device has been started.
- if (event_handler_)
- event_handler_->OnDeviceStarted(device_id);
-}
-
-void AudioInputDevice::OnIPCClosed() {
- ipc_ = NULL;
-}
-
-AudioInputDevice::~AudioInputDevice() {
- // TODO(henrika): The current design requires that the user calls
- // Stop before deleting this class.
- CHECK_EQ(0, stream_id_);
-}
-
-void AudioInputDevice::InitializeOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
- // Make sure we don't call Start() more than once.
- DCHECK_EQ(0, stream_id_);
- if (stream_id_)
- return;
-
- stream_id_ = ipc_->AddDelegate(this);
- // If |session_id_| is not specified, it will directly create the stream;
- // otherwise it will send a AudioInputHostMsg_StartDevice msg to the browser
- // and create the stream when getting a OnDeviceReady() callback.
- if (!session_id_) {
- ipc_->CreateStream(stream_id_, audio_parameters_,
- AudioManagerBase::kDefaultDeviceId, agc_is_enabled_);
- } else {
- ipc_->StartDevice(stream_id_, session_id_);
- pending_device_ready_ = true;
- }
-}
-
-void AudioInputDevice::SetSessionIdOnIOThread(int session_id) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- session_id_ = session_id;
-}
-
-void AudioInputDevice::StartOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (stream_id_)
- ipc_->RecordStream(stream_id_);
-}
-
-void AudioInputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
- // NOTE: |completion| may be NULL.
- // Make sure we don't call shutdown more than once.
- if (stream_id_) {
- if (ipc_) {
- ipc_->CloseStream(stream_id_);
- ipc_->RemoveDelegate(stream_id_);
- }
-
- stream_id_ = 0;
- session_id_ = 0;
- pending_device_ready_ = false;
- agc_is_enabled_ = false;
- }
-
- // We can run into an issue where ShutDownOnIOThread is called right after
- // OnStreamCreated is called in cases where Start/Stop are called before we
- // get the OnStreamCreated callback. To handle that corner case, we call
- // Stop(). In most cases, the thread will already be stopped.
- // Another situation is when the IO thread goes away before Stop() is called
- // in which case, we cannot use the message loop to close the thread handle
- // and can't not rely on the main thread existing either.
- base::ThreadRestrictions::ScopedAllowIO allow_io;
- audio_thread_.Stop(NULL);
- audio_callback_.reset();
-}
-
-void AudioInputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (stream_id_)
- ipc_->SetVolume(stream_id_, volume);
-}
-
-void AudioInputDevice::SetAutomaticGainControlOnIOThread(bool enabled) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- DCHECK_EQ(0, stream_id_) <<
- "The AGC state can not be modified while capturing is active.";
- if (stream_id_)
- return;
-
- // We simply store the new AGC setting here. This value will be used when
- // a new stream is initialized and by GetAutomaticGainControl().
- agc_is_enabled_ = enabled;
-}
-
-void AudioInputDevice::WillDestroyCurrentMessageLoop() {
- LOG(ERROR) << "IO loop going away before the input device has been stopped";
- ShutDownOnIOThread();
-}
-
-// AudioInputDevice::AudioThreadCallback
-AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
- const AudioParameters& audio_parameters,
- base::SharedMemoryHandle memory,
- int memory_length,
- CaptureCallback* capture_callback)
- : AudioDeviceThread::Callback(audio_parameters, 0, memory, memory_length),
- capture_callback_(capture_callback) {
- audio_bus_ = AudioBus::Create(audio_parameters_);
-}
-
-AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
-}
-
-void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
- shared_memory_.Map(memory_length_);
-}
-
-void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
- // The shared memory represents parameters, size of the data buffer and the
- // actual data buffer containing audio data. Map the memory into this
- // structure and parse out parameters and the data area.
- AudioInputBuffer* buffer =
- reinterpret_cast<AudioInputBuffer*>(shared_memory_.memory());
- DCHECK_EQ(buffer->params.size,
- memory_length_ - sizeof(AudioInputBufferParameters));
- double volume = buffer->params.volume;
-
- int audio_delay_milliseconds = pending_data / bytes_per_ms_;
- int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
- const int bytes_per_sample = sizeof(memory[0]);
-
- // Deinterleave each channel and convert to 32-bit floating-point
- // with nominal range -1.0 -> +1.0.
- audio_bus_->FromInterleaved(memory, audio_bus_->frames(), bytes_per_sample);
-
- // Deliver captured data to the client in floating point format
- // and update the audio-delay measurement.
- capture_callback_->Capture(audio_bus_.get(),
- audio_delay_milliseconds, volume);
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_device.h b/src/media/audio/audio_input_device.h
deleted file mode 100644
index edefdf1..0000000
--- a/src/media/audio/audio_input_device.h
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Low-latency audio capturing class utilizing audio input stream provided
-// by a server (browser) process by use of an IPC interface.
-//
-// Relationship of classes:
-//
-// AudioInputController AudioInputDevice
-// ^ ^
-// | |
-// v IPC v
-// AudioInputRendererHost <---------> AudioInputIPCDelegate
-// ^ (impl in AudioInputMessageFilter)
-// |
-// v
-// AudioInputDeviceManager
-//
-// Transportation of audio samples from the browser to the render process
-// is done by using shared memory in combination with a SyncSocket.
-// The AudioInputDevice user registers an AudioInputDevice::CaptureCallback by
-// calling Initialize(). The callback will be called with recorded audio from
-// the underlying audio layers.
-// The session ID is used by the AudioInputRendererHost to start the device
-// referenced by this ID.
-//
-// State sequences:
-//
-// Sequence where session_id has not been set using SetDevice():
-// ('<-' signifies callbacks, -> signifies calls made by AudioInputDevice)
-// Start -> InitializeOnIOThread -> CreateStream ->
-// <- OnStreamCreated <-
-// -> StartOnIOThread -> PlayStream ->
-//
-// Sequence where session_id has been set using SetDevice():
-// Start -> InitializeOnIOThread -> StartDevice ->
-// <- OnDeviceReady <-
-// -> CreateStream ->
-// <- OnStreamCreated <-
-// -> StartOnIOThread -> PlayStream ->
-//
-// AudioInputDevice::Capture => low latency audio transport on audio thread =>
-// |
-// Stop --> ShutDownOnIOThread ------> CloseStream -> Close
-//
-// This class depends on two threads to function:
-//
-// 1. An IO thread.
-// This thread is used to asynchronously process Start/Stop etc operations
-// that are available via the public interface. The public methods are
-// asynchronous and simply post a task to the IO thread to actually perform
-// the work.
-// 2. Audio transport thread.
-// Responsible for calling the CaptureCallback and feed audio samples from
-// the server side audio layer using a socket and shared memory.
-//
-// Implementation notes:
-// - The user must call Stop() before deleting the class instance.
-
-#ifndef MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
-#define MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/shared_memory.h"
-#include "media/audio/audio_device_thread.h"
-#include "media/audio/audio_input_ipc.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
-#include "media/base/audio_capturer_source.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// TODO(henrika): This class is based on the AudioOutputDevice class and it has
-// many components in common. Investigate potential for re-factoring.
-// TODO(henrika): Add support for event handling (e.g. OnStateChanged,
-// OnCaptureStopped etc.) and ensure that we can deliver these notifications
-// to any clients using this class.
-class MEDIA_EXPORT AudioInputDevice
- : NON_EXPORTED_BASE(public AudioCapturerSource),
- NON_EXPORTED_BASE(public AudioInputIPCDelegate),
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
- public:
- AudioInputDevice(AudioInputIPC* ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
-
- // AudioCapturerSource implementation.
- virtual void Initialize(const AudioParameters& params,
- CaptureCallback* callback,
- CaptureEventHandler* event_handler) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void SetDevice(int session_id) OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
-
- protected:
- // Methods called on IO thread ----------------------------------------------
- // AudioInputIPCDelegate implementation.
- virtual void OnStreamCreated(base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) OVERRIDE;
- virtual void OnVolume(double volume) OVERRIDE;
- virtual void OnStateChanged(
- AudioInputIPCDelegate::State state) OVERRIDE;
- virtual void OnDeviceReady(const std::string& device_id) OVERRIDE;
- virtual void OnIPCClosed() OVERRIDE;
-
- friend class base::RefCountedThreadSafe<AudioInputDevice>;
- virtual ~AudioInputDevice();
-
- private:
- // Methods called on IO thread ----------------------------------------------
- // The following methods are tasks posted on the IO thread that needs to
- // be executed on that thread. They interact with AudioInputMessageFilter and
- // sends IPC messages on that thread.
- void InitializeOnIOThread();
- void SetSessionIdOnIOThread(int session_id);
- void StartOnIOThread();
- void ShutDownOnIOThread();
- void SetVolumeOnIOThread(double volume);
- void SetAutomaticGainControlOnIOThread(bool enabled);
-
- // MessageLoop::DestructionObserver implementation for the IO loop.
- // If the IO loop dies before we do, we shut down the audio thread from here.
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
-
- AudioParameters audio_parameters_;
-
- CaptureCallback* callback_;
- CaptureEventHandler* event_handler_;
-
- AudioInputIPC* ipc_;
-
- // Our stream ID on the message filter. Only modified on the IO thread.
- int stream_id_;
-
- // The media session ID used to identify which input device to be started.
- // Only modified on the IO thread.
- int session_id_;
-
- // State variable used to indicate it is waiting for a OnDeviceReady()
- // callback. Only modified on the IO thread.
- bool pending_device_ready_;
-
- // Stores the Automatic Gain Control state. Default is false.
- // Only modified on the IO thread.
- bool agc_is_enabled_;
-
- // Our audio thread callback class. See source file for details.
- class AudioThreadCallback;
-
- // In order to avoid a race between OnStreamCreated and Stop(), we use this
- // guard to control stopping and starting the audio thread.
- base::Lock audio_thread_lock_;
- AudioDeviceThread audio_thread_;
- scoped_ptr<AudioInputDevice::AudioThreadCallback> audio_callback_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AudioInputDevice);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_INPUT_DEVICE_H_
diff --git a/src/media/audio/audio_input_device_unittest.cc b/src/media/audio/audio_input_device_unittest.cc
deleted file mode 100644
index dc211a4..0000000
--- a/src/media/audio/audio_input_device_unittest.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/environment.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_manager_base.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/wavein_input_win.h"
-#endif
-
-namespace media {
-
-// Test fixture which allows us to override the default enumeration API on
-// Windows.
-class AudioInputDeviceTest
- : public ::testing::Test {
- protected:
- AudioInputDeviceTest()
- : audio_manager_(AudioManager::Create())
-#if defined(OS_WIN)
- , com_init_(base::win::ScopedCOMInitializer::kMTA)
-#endif
- {
- }
-
-#if defined(OS_WIN)
- bool SetMMDeviceEnumeration() {
- AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
- // Windows Wave is used as default if Windows XP was detected =>
- // return false since MMDevice is not supported on XP.
- if (amw->enumeration_type() == AudioManagerWin::kWaveEnumeration)
- return false;
-
- amw->SetEnumerationType(AudioManagerWin::kMMDeviceEnumeration);
- return true;
- }
-
- void SetWaveEnumeration() {
- AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
- amw->SetEnumerationType(AudioManagerWin::kWaveEnumeration);
- }
-
- std::string GetDeviceIdFromPCMWaveInAudioInputStream(
- const std::string& device_id) {
- AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
- AudioParameters parameters(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- AudioParameters::kAudioCDSampleRate, 16,
- 1024);
- scoped_ptr<PCMWaveInAudioInputStream> stream(
- static_cast<PCMWaveInAudioInputStream*>(
- amw->CreatePCMWaveInAudioInputStream(parameters, device_id)));
- return stream.get() ? stream->device_id_ : std::string();
- }
-#endif
-
- // Helper method which verifies that the device list starts with a valid
- // default record followed by non-default device names.
- static void CheckDeviceNames(const AudioDeviceNames& device_names) {
- if (!device_names.empty()) {
- AudioDeviceNames::const_iterator it = device_names.begin();
-
- // The first device in the list should always be the default device.
- EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
- EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
- ++it;
-
- // Other devices should have non-empty name and id and should not contain
- // default name or id.
- while (it != device_names.end()) {
- EXPECT_FALSE(it->device_name.empty());
- EXPECT_FALSE(it->unique_id.empty());
- EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
- EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
- it->unique_id);
- ++it;
- }
- } else {
- // Log a warning so we can see the status on the build bots. No need to
- // break the test though since this does successfully test the code and
- // some failure cases.
- LOG(WARNING) << "No input devices detected";
- }
- }
-
- bool CanRunAudioTest() {
- return audio_manager_->HasAudioInputDevices();
- }
-
- scoped_ptr<AudioManager> audio_manager_;
-
-#if defined(OS_WIN)
- // The MMDevice API requires COM to be initialized on the current thread.
- base::win::ScopedCOMInitializer com_init_;
-#endif
-};
-
-// Test that devices can be enumerated.
-TEST_F(AudioInputDeviceTest, EnumerateDevices) {
- if (!CanRunAudioTest())
- return;
-
- AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- CheckDeviceNames(device_names);
-}
-
-// Run additional tests for Windows since enumeration can be done using
-// two different APIs. MMDevice is default for Vista and higher and Wave
-// is default for XP and lower.
-#if defined(OS_WIN)
-
-// Override default enumeration API and force usage of Windows MMDevice.
-// This test will only run on Windows Vista and higher.
-TEST_F(AudioInputDeviceTest, EnumerateDevicesWinMMDevice) {
- if (!CanRunAudioTest())
- return;
-
- AudioDeviceNames device_names;
- if (!SetMMDeviceEnumeration()) {
- // Usage of MMDevice will fail on XP and lower.
- LOG(WARNING) << "MM device enumeration is not supported.";
- return;
- }
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- CheckDeviceNames(device_names);
-}
-
-// Override default enumeration API and force usage of Windows Wave.
-// This test will run on Windows XP, Windows Vista and Windows 7.
-TEST_F(AudioInputDeviceTest, EnumerateDevicesWinWave) {
- if (!CanRunAudioTest())
- return;
-
- AudioDeviceNames device_names;
- SetWaveEnumeration();
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- CheckDeviceNames(device_names);
-}
-
-TEST_F(AudioInputDeviceTest, WinXPDeviceIdUnchanged) {
- if (!CanRunAudioTest())
- return;
-
- AudioDeviceNames xp_device_names;
- SetWaveEnumeration();
- audio_manager_->GetAudioInputDeviceNames(&xp_device_names);
- CheckDeviceNames(xp_device_names);
-
- // Device ID should remain unchanged, including the default device ID.
- for (AudioDeviceNames::iterator i = xp_device_names.begin();
- i != xp_device_names.end(); ++i) {
- EXPECT_EQ(i->unique_id,
- GetDeviceIdFromPCMWaveInAudioInputStream(i->unique_id));
- }
-}
-
-TEST_F(AudioInputDeviceTest, ConvertToWinXPDeviceId) {
- if (!CanRunAudioTest())
- return;
-
- if (!SetMMDeviceEnumeration()) {
- // Usage of MMDevice will fail on XP and lower.
- LOG(WARNING) << "MM device enumeration is not supported.";
- return;
- }
-
- AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- CheckDeviceNames(device_names);
-
- for (AudioDeviceNames::iterator i = device_names.begin();
- i != device_names.end(); ++i) {
- std::string converted_id =
- GetDeviceIdFromPCMWaveInAudioInputStream(i->unique_id);
- if (i == device_names.begin()) {
- // The first in the list is the default device ID, which should not be
- // changed when passed to PCMWaveInAudioInputStream.
- EXPECT_EQ(i->unique_id, converted_id);
- } else {
- // MMDevice-style device IDs should be converted to WaveIn-style device
- // IDs.
- EXPECT_NE(i->unique_id, converted_id);
- }
- }
-}
-
-#endif
-
-} // namespace media
diff --git a/src/media/audio/audio_input_ipc.cc b/src/media/audio/audio_input_ipc.cc
deleted file mode 100644
index 69253b0..0000000
--- a/src/media/audio/audio_input_ipc.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_input_ipc.h"
-
-namespace media {
-
-AudioInputIPCDelegate::~AudioInputIPCDelegate() {}
-
-AudioInputIPC::~AudioInputIPC() {}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_ipc.h b/src/media/audio/audio_input_ipc.h
deleted file mode 100644
index eb4e72d..0000000
--- a/src/media/audio/audio_input_ipc.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
-#define MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
-
-#include "base/shared_memory.h"
-#include "base/sync_socket.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Contains IPC notifications for the state of the server side
-// (AudioInputController) audio state changes and when an AudioInputController
-// has been created. Implemented by AudioInputDevice.
-class MEDIA_EXPORT AudioInputIPCDelegate {
- public:
- // Valid states for the input stream.
- enum State {
- kRecording,
- kStopped,
- kError
- };
-
- // Called when an AudioInputController has been created.
- // The shared memory |handle| points to a memory section that's used to
- // transfer data between the AudioInputDevice and AudioInputController
- // objects. The implementation of OnStreamCreated takes ownership.
- // The |socket_handle| is used by the AudioInputController to signal
- // notifications that more data is available and can optionally provide
- // parameter changes back. The AudioInputDevice must read from this socket
- // and process the shared memory whenever data is read from the socket.
- virtual void OnStreamCreated(base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) = 0;
-
- // Called when state of an audio stream has changed.
- virtual void OnStateChanged(State state) = 0;
-
- // Called when the input stream volume has changed.
- virtual void OnVolume(double volume) = 0;
-
- // Called when a device has been started on the server side.
- // If the device could not be started, |device_id| will be empty.
- virtual void OnDeviceReady(const std::string& device_id) = 0;
-
- // Called when the AudioInputIPC object is going away and/or when the
- // IPC channel has been closed and no more IPC requests can be made.
- // Implementations must clear any references to the AudioInputIPC object
- // at this point.
- virtual void OnIPCClosed() = 0;
-
- protected:
- virtual ~AudioInputIPCDelegate();
-};
-
-// Provides IPC functionality for an AudioInputDevice. The implementation
-// should asynchronously deliver the messages to an AudioInputController object
-// (or create one in the case of CreateStream()), that may live in a separate
-// process.
-class MEDIA_EXPORT AudioInputIPC {
- public:
- // Registers an AudioInputIPCDelegate and returns a |stream_id| that
- // must be used with all other IPC functions in this interface.
- virtual int AddDelegate(AudioInputIPCDelegate* delegate) = 0;
-
- // Unregisters a delegate that was previously registered via a call to
- // AddDelegate(). The audio stream should be in a closed state prior to
- // calling this function.
- virtual void RemoveDelegate(int stream_id) = 0;
-
- // Sends a request to create an AudioInputController object in the peer
- // process, identify it by |stream_id| and configure it to use the specified
- // audio |params|. Once the stream has been created, the implementation must
- // generate a notification to the AudioInputIPCDelegate and call
- // OnStreamCreated().
- virtual void CreateStream(int stream_id, const AudioParameters& params,
- const std::string& device_id, bool automatic_gain_control) = 0;
-
- // Starts the device on the server side. Once the device has started,
- // or failed to start, a callback to
- // AudioInputIPCDelegate::OnDeviceReady() must be made.
- virtual void StartDevice(int stream_id, int session_id) = 0;
-
- // Corresponds to a call to AudioInputController::Record() on the server side.
- virtual void RecordStream(int stream_id) = 0;
-
- // Sets the volume of the audio stream.
- virtual void SetVolume(int stream_id, double volume) = 0;
-
- // Closes the audio stream and deletes the matching AudioInputController
- // instance. Prior to deleting the AudioInputController object, a call to
- // AudioInputController::Close must be made.
- virtual void CloseStream(int stream_id) = 0;
-
- protected:
- virtual ~AudioInputIPC();
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_INPUT_IPC_H_
diff --git a/src/media/audio/audio_input_stream_impl.cc b/src/media/audio/audio_input_stream_impl.cc
deleted file mode 100644
index f68317c..0000000
--- a/src/media/audio/audio_input_stream_impl.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "media/audio/audio_input_stream_impl.h"
-
-namespace media {
-
-static const int kMinIntervalBetweenVolumeUpdatesMs = 1000;
-
-AudioInputStreamImpl::AudioInputStreamImpl()
- : agc_is_enabled_(false),
- max_volume_(0.0),
- normalized_volume_(0.0) {
-}
-
-AudioInputStreamImpl::~AudioInputStreamImpl() {}
-
-void AudioInputStreamImpl::SetAutomaticGainControl(bool enabled) {
- agc_is_enabled_ = enabled;
-}
-
-bool AudioInputStreamImpl::GetAutomaticGainControl() {
- return agc_is_enabled_;
-}
-
-void AudioInputStreamImpl::UpdateAgcVolume() {
- base::AutoLock lock(lock_);
-
- // We take new volume samples once every second when the AGC is enabled.
- // To ensure that a new setting has an immediate effect, the new volume
- // setting is cached here. It will ensure that the next OnData() callback
- // will contain a new valid volume level. If this approach was not taken,
- // we could report invalid volume levels to the client for a time period
- // of up to one second.
- if (agc_is_enabled_) {
- GetNormalizedVolume();
- }
-}
-
-void AudioInputStreamImpl::QueryAgcVolume(double* normalized_volume) {
- base::AutoLock lock(lock_);
-
- // Only modify the |volume| output reference if AGC is enabled and if
- // more than one second has passed since the volume was updated the last time.
- if (agc_is_enabled_) {
- base::Time now = base::Time::Now();
- if ((now - last_volume_update_time_).InMilliseconds() >
- kMinIntervalBetweenVolumeUpdatesMs) {
- GetNormalizedVolume();
- last_volume_update_time_ = now;
- }
- *normalized_volume = normalized_volume_;
- }
-}
-
-void AudioInputStreamImpl::GetNormalizedVolume() {
- if (max_volume_ == 0.0) {
- // Cach the maximum volume if this is the first time we ask for it.
- max_volume_ = GetMaxVolume();
- }
-
- if (max_volume_ != 0.0) {
- // Retrieve the current volume level by asking the audio hardware.
- // Range is normalized to [0.0,1.0] or [0.0, 1.5] on Linux.
- normalized_volume_ = GetVolume() / max_volume_;
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_stream_impl.h b/src/media/audio/audio_input_stream_impl.h
deleted file mode 100644
index 64980a9..0000000
--- a/src/media/audio/audio_input_stream_impl.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_INPUT_STREAM_IMPL_H_
-#define MEDIA_AUDIO_AUDIO_INPUT_STREAM_IMPL_H_
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-
-namespace media {
-
-// AudioInputStreamImpl implements platform-independent parts of the
-// AudioInputStream interface. Each platform dependent implementation
-// should derive from this class.
-// TODO(henrika): we can probably break out more parts from our current
-// AudioInputStream implementation and move out to this class.
-class MEDIA_EXPORT AudioInputStreamImpl : public AudioInputStream {
- public:
- AudioInputStreamImpl();
- virtual ~AudioInputStreamImpl();
-
- // Sets the automatic gain control (AGC) to on or off. When AGC is enabled,
- // the microphone volume is queried periodically and the volume level is
- // provided in each AudioInputCallback::OnData() callback and fed to the
- // render-side AGC.
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
-
- // Gets the current automatic gain control state.
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- protected:
- // Stores a new volume level by asking the audio hardware.
- // This method only has an effect if AGC is enabled.
- void UpdateAgcVolume();
-
- // Gets the latest stored volume level if AGC is enabled and if
- // more than one second has passed since the volume was updated the last time.
- void QueryAgcVolume(double* normalized_volume);
-
- private:
- // Takes a volume sample and stores it in |normalized_volume_|.
- void GetNormalizedVolume();
-
- // True when automatic gain control is enabled, false otherwise.
- // Guarded by |lock_|.
- bool agc_is_enabled_;
-
- // Stores the maximum volume which is used for normalization to a volume
- // range of [0.0, 1.0].
- double max_volume_;
-
- // Contains last result of internal call to GetVolume(). We save resources
- // but not querying the capture volume for each callback. Guarded by |lock_|.
- // The range is normalized to [0.0, 1.0].
- double normalized_volume_;
-
- // Protects |agc_is_enabled_| and |volume_| .
- base::Lock lock_;
-
- // Keeps track of the last time the microphone volume level was queried.
- base::Time last_volume_update_time_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioInputStreamImpl);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_INPUT_STREAM_IMPL_H_
diff --git a/src/media/audio/audio_input_unittest.cc b/src/media/audio/audio_input_unittest.cc
deleted file mode 100644
index 5a02323..0000000
--- a/src/media/audio/audio_input_unittest.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/environment.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/threading/platform_thread.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int kSamplingRate = 8000;
-static const int kSamplesPerPacket = kSamplingRate / 20;
-
-// This class allows to find out if the callbacks are occurring as
-// expected and if any error has been reported.
-class TestInputCallback : public AudioInputStream::AudioInputCallback {
- public:
- explicit TestInputCallback(int max_data_bytes)
- : callback_count_(0),
- had_error_(0),
- max_data_bytes_(max_data_bytes) {
- }
- virtual void OnData(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes, double volume) {
- ++callback_count_;
- // Read the first byte to make sure memory is good.
- if (size) {
- ASSERT_LE(static_cast<int>(size), max_data_bytes_);
- int value = data[0];
- EXPECT_GE(value, 0);
- }
- }
- virtual void OnClose(AudioInputStream* stream) {}
- virtual void OnError(AudioInputStream* stream, int code) {
- ++had_error_;
- }
- // Returns how many times OnData() has been called.
- int callback_count() const {
- return callback_count_;
- }
- // Returns how many times the OnError callback was called.
- int had_error() const {
- return had_error_;
- }
-
- private:
- int callback_count_;
- int had_error_;
- int max_data_bytes_;
-};
-
-static bool CanRunAudioTests(AudioManager* audio_man) {
- bool has_input = audio_man->HasAudioInputDevices();
-
- if (!has_input)
- LOG(WARNING) << "No input devices detected";
-
- return has_input;
-}
-
-static AudioInputStream* CreateTestAudioInputStream(AudioManager* audio_man) {
- AudioInputStream* ais = audio_man->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- kSamplingRate, 16, kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(NULL != ais);
- return ais;
-}
-
-// Test that AudioInputStream rejects out of range parameters.
-TEST(AudioInputTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!CanRunAudioTests(audio_man.get()))
- return;
-
- AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_7_1, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- 1000 * kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, -16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 16, -1024),
- AudioManagerBase::kDefaultDeviceId));
-}
-
-// Test create and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!CanRunAudioTests(audio_man.get()))
- return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- ais->Close();
-}
-
-// Test create, open and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!CanRunAudioTests(audio_man.get()))
- return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Close();
-}
-
-// Test create, open, stop and close of an AudioInputStream without recording.
-TEST(AudioInputTest, OpenStopAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!CanRunAudioTests(audio_man.get()))
- return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Stop();
- ais->Close();
-}
-
-// Test a normal recording sequence using an AudioInputStream.
-TEST(AudioInputTest, Record) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!CanRunAudioTests(audio_man.get()))
- return;
- MessageLoop message_loop(MessageLoop::TYPE_DEFAULT);
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
-
- TestInputCallback test_callback(kSamplesPerPacket * 4);
- ais->Start(&test_callback);
- // Verify at least 500ms worth of audio was recorded, after giving sufficient
- // extra time.
- message_loop.PostDelayedTask(
- FROM_HERE,
- MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(690));
- message_loop.Run();
- EXPECT_GE(test_callback.callback_count(), 1);
- EXPECT_FALSE(test_callback.had_error());
-
- ais->Stop();
- ais->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_input_volume_unittest.cc b/src/media/audio/audio_input_volume_unittest.cc
deleted file mode 100644
index 8f754cc..0000000
--- a/src/media/audio/audio_input_volume_unittest.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cmath>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/core_audio_util_win.h"
-#endif
-
-namespace media {
-
-class AudioInputVolumeTest : public ::testing::Test {
- protected:
- AudioInputVolumeTest()
- : audio_manager_(AudioManager::Create())
-#if defined(OS_WIN)
- , com_init_(base::win::ScopedCOMInitializer::kMTA)
-#endif
- {
- }
-
- bool CanRunAudioTests() {
-#if defined(OS_WIN)
- // TODO(henrika): add support for volume control on Windows XP as well.
- // For now, we might as well signal false already here to avoid running
- // these tests on Windows XP.
- if (!CoreAudioUtil::IsSupported())
- return false;
-#endif
- if (!audio_manager_.get())
- return false;
-
- return audio_manager_->HasAudioInputDevices();
- }
-
- // Helper method which checks if the stream has volume support.
- bool HasDeviceVolumeControl(AudioInputStream* stream) {
- if (!stream)
- return false;
-
- return (stream->GetMaxVolume() != 0.0);
- }
-
- AudioInputStream* CreateAndOpenStream(const std::string& device_id) {
- AudioParameters::Format format = AudioParameters::AUDIO_PCM_LOW_LATENCY;
- ChannelLayout channel_layout =
- media::GetAudioInputHardwareChannelLayout(device_id);
- int bits_per_sample = 16;
- int sample_rate =
- static_cast<int>(media::GetAudioInputHardwareSampleRate(device_id));
- int samples_per_packet = 0;
-#if defined(OS_MACOSX)
- samples_per_packet = (sample_rate / 100);
-#elif defined(OS_LINUX) || defined(OS_OPENBSD)
- samples_per_packet = (sample_rate / 100);
-#elif defined(OS_WIN)
- if (sample_rate == 44100)
- samples_per_packet = 448;
- else
- samples_per_packet = (sample_rate / 100);
-#else
-#error Unsupported platform
-#endif
- AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
- AudioParameters(format, channel_layout, sample_rate, bits_per_sample,
- samples_per_packet),
- device_id);
- EXPECT_TRUE(NULL != ais);
-
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
- // Some linux devices do not support our settings, we may fail to open
- // those devices.
- if (!ais->Open()) {
- // Default device should always be able to be opened.
- EXPECT_TRUE(AudioManagerBase::kDefaultDeviceId != device_id);
- ais->Close();
- ais = NULL;
- }
-#elif defined(OS_WIN) || defined(OS_MACOSX)
- EXPECT_TRUE(ais->Open());
-#endif
-
- return ais;
- }
-
- scoped_ptr<AudioManager> audio_manager_;
-
-#if defined(OS_WIN)
- base::win::ScopedCOMInitializer com_init_;
-#endif
-};
-
-TEST_F(AudioInputVolumeTest, InputVolumeTest) {
- if (!CanRunAudioTests())
- return;
-
- // Retrieve a list of all available input devices.
- AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- if (device_names.empty()) {
- LOG(WARNING) << "Could not find any available input device";
- return;
- }
-
- // Scan all available input devices and repeat the same test for all of them.
- for (AudioDeviceNames::const_iterator it = device_names.begin();
- it != device_names.end();
- ++it) {
- AudioInputStream* ais = CreateAndOpenStream(it->unique_id);
- if (!ais) {
- DLOG(WARNING) << "Failed to open stream for device " << it->unique_id;
- continue;
- }
-
- if (!HasDeviceVolumeControl(ais)) {
- DLOG(WARNING) << "Device: " << it->unique_id
- << ", does not have volume control.";
- ais->Close();
- continue;
- }
-
- double max_volume = ais->GetMaxVolume();
- EXPECT_GT(max_volume, 0.0);
-
- // Store the current input-device volume level.
- double original_volume = ais->GetVolume();
- EXPECT_GE(original_volume, 0.0);
-#if defined(OS_WIN) || defined(OS_MACOSX)
- // Note that |original_volume| can be higher than |max_volume| on Linux.
- EXPECT_LE(original_volume, max_volume);
-#endif
-
- // Set the volume to the maxiumum level..
- ais->SetVolume(max_volume);
- double current_volume = ais->GetVolume();
- EXPECT_EQ(max_volume, current_volume);
-
- // Set the volume to the mininum level (=0).
- double new_volume = 0.0;
- ais->SetVolume(new_volume);
- current_volume = ais->GetVolume();
- EXPECT_EQ(new_volume, current_volume);
-
- // Set the volume to the mid level (50% of max).
- // Verify that the absolute error is small enough.
- new_volume = max_volume / 2;
- ais->SetVolume(new_volume);
- current_volume = ais->GetVolume();
- EXPECT_LT(current_volume, max_volume);
- EXPECT_GT(current_volume, 0);
- EXPECT_NEAR(current_volume, new_volume, 0.25 * max_volume);
-
- // Restores the volume to the original value.
- ais->SetVolume(original_volume);
- current_volume = ais->GetVolume();
- EXPECT_EQ(original_volume, current_volume);
-
- ais->Close();
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_io.h b/src/media/audio/audio_io.h
deleted file mode 100644
index e7b9a36..0000000
--- a/src/media/audio/audio_io.h
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_IO_H_
-#define MEDIA_AUDIO_AUDIO_IO_H_
-
-#include "base/basictypes.h"
-#include "media/audio/audio_buffers_state.h"
-#include "media/base/audio_bus.h"
-
-// Low-level audio output support. To make sound there are 3 objects involved:
-// - AudioSource : produces audio samples on a pull model. Implements
-// the AudioSourceCallback interface.
-// - AudioOutputStream : uses the AudioSource to render audio on a given
-// channel, format and sample frequency configuration. Data from the
-// AudioSource is delivered in a 'pull' model.
-// - AudioManager : factory for the AudioOutputStream objects, manager
-// of the hardware resources and mixer control.
-//
-// The number and configuration of AudioOutputStream does not need to match the
-// physically available hardware resources. For example you can have:
-//
-// MonoPCMSource1 --> MonoPCMStream1 --> | | --> audio left channel
-// StereoPCMSource -> StereoPCMStream -> | mixer |
-// MonoPCMSource2 --> MonoPCMStream2 --> | | --> audio right channel
-//
-// This facility's objective is mix and render audio with low overhead using
-// the OS basic audio support, abstracting as much as possible the
-// idiosyncrasies of each platform. Non-goals:
-// - Positional, 3d audio
-// - Dependence on non-default libraries such as DirectX 9, 10, XAudio
-// - Digital signal processing or effects
-// - Extra features if a specific hardware is installed (EAX, X-fi)
-//
-// The primary client of this facility is audio coming from several tabs.
-// Specifically for this case we avoid supporting complex formats such as MP3
-// or WMA. Complex format decoding should be done by the renderers.
-
-
-// Models an audio stream that gets rendered to the audio hardware output.
-// Because we support more audio streams than physically available channels
-// a given AudioOutputStream might or might not talk directly to hardware.
-// An audio stream allocates several buffers for audio data and calls
-// AudioSourceCallback::OnMoreData() periodically to fill these buffers,
-// as the data is written to the audio device. Size of each packet is determined
-// by |samples_per_packet| specified in AudioParameters when the stream is
-// created.
-
-namespace media {
-
-class MEDIA_EXPORT AudioOutputStream {
- public:
- // Audio sources must implement AudioSourceCallback. This interface will be
- // called in a random thread which very likely is a high priority thread. Do
- // not rely on using this thread TLS or make calls that alter the thread
- // itself such as creating Windows or initializing COM.
- class MEDIA_EXPORT AudioSourceCallback {
- public:
- // Provide more data by fully filling |dest|. The source will return
- // the number of frames it filled. |buffers_state| contains current state
- // of the buffers, and can be used by the source to calculate delay.
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) = 0;
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) = 0;
-
- // There was an error while playing a buffer. Audio source cannot be
- // destroyed yet. No direct action needed by the AudioStream, but it is
- // a good place to stop accumulating sound data since is is likely that
- // playback will not continue. |code| is an error code that is platform
- // specific.
- virtual void OnError(AudioOutputStream* stream, int code) = 0;
-
- // Deprecated. DO NOT USE. Waits until data becomes available. Used only
- // by Windows' WaveOut clients which may be extremely laggy. Will yield the
- // current thread until the renderer client has written its audio data or
- // 1.5 seconds have elapsed.
- virtual void WaitTillDataReady() {}
-
- protected:
- virtual ~AudioSourceCallback() {}
- };
-
- virtual ~AudioOutputStream() {}
-
- // Open the stream. false is returned if the stream cannot be opened. Open()
- // must always be followed by a call to Close() even if Open() fails.
- virtual bool Open() = 0;
-
- // Starts playing audio and generating AudioSourceCallback::OnMoreData().
- // Since implementor of AudioOutputStream may have internal buffers, right
- // after calling this method initial buffers are fetched.
- //
- // The output stream does not take ownership of this callback.
- virtual void Start(AudioSourceCallback* callback) = 0;
-
- // Stops playing audio. Effect might not be instantaneous as the hardware
- // might have locked audio data that is processing.
- virtual void Stop() = 0;
-
- // Sets the relative volume, with range [0.0, 1.0] inclusive.
- virtual void SetVolume(double volume) = 0;
-
- // Gets the relative volume, with range [0.0, 1.0] inclusive.
- virtual void GetVolume(double* volume) = 0;
-
- // Close the stream. This also generates AudioSourceCallback::OnClose().
- // After calling this method, the object should not be used anymore.
- virtual void Close() = 0;
-};
-
-// Models an audio sink receiving recorded audio from the audio driver.
-class MEDIA_EXPORT AudioInputStream {
- public:
- class MEDIA_EXPORT AudioInputCallback {
- public:
- // Called by the audio recorder when a full packet of audio data is
- // available. This is called from a special audio thread and the
- // implementation should return as soon as possible.
- virtual void OnData(AudioInputStream* stream, const uint8* src,
- uint32 size, uint32 hardware_delay_bytes,
- double volume) = 0;
-
- // The stream is done with this callback, the last call received by this
- // audio sink.
- virtual void OnClose(AudioInputStream* stream) = 0;
-
- // There was an error while recording audio. The audio sink cannot be
- // destroyed yet. No direct action needed by the AudioInputStream, but it
- // is a good place to stop accumulating sound data since is is likely that
- // recording will not continue. |code| is an error code that is platform
- // specific.
- virtual void OnError(AudioInputStream* stream, int code) = 0;
-
- protected:
- virtual ~AudioInputCallback() {}
- };
-
- virtual ~AudioInputStream() {}
-
- // Open the stream and prepares it for recording. Call Start() to actually
- // begin recording.
- virtual bool Open() = 0;
-
- // Starts recording audio and generating AudioInputCallback::OnData().
- // The input stream does not take ownership of this callback.
- virtual void Start(AudioInputCallback* callback) = 0;
-
- // Stops recording audio. Effect might not be instantaneous as there could be
- // pending audio callbacks in the queue which will be issued first before
- // recording stops.
- virtual void Stop() = 0;
-
- // Close the stream. This also generates AudioInputCallback::OnClose(). This
- // should be the last call made on this object.
- virtual void Close() = 0;
-
- // Returns the maximum microphone analog volume or 0.0 if device does not
- // have volume control.
- virtual double GetMaxVolume() = 0;
-
- // Sets the microphone analog volume, with range [0, max_volume] inclusive.
- virtual void SetVolume(double volume) = 0;
-
- // Returns the microphone analog volume, with range [0, max_volume] inclusive.
- virtual double GetVolume() = 0;
-
- // Sets the Automatic Gain Control (AGC) state.
- virtual void SetAutomaticGainControl(bool enabled) = 0;
-
- // Returns the Automatic Gain Control (AGC) state.
- virtual bool GetAutomaticGainControl() = 0;
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_IO_H_
diff --git a/src/media/audio/audio_low_latency_input_output_unittest.cc b/src/media/audio/audio_low_latency_input_output_unittest.cc
deleted file mode 100644
index 463321a..0000000
--- a/src/media/audio/audio_low_latency_input_output_unittest.cc
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/environment.h"
-#include "base/file_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/synchronization/lock.h"
-#include "base/test/test_timeouts.h"
-#include "base/time.h"
-#include "build/build_config.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_util.h"
-#include "media/base/seekable_buffer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
-#include "media/audio/linux/audio_manager_linux.h"
-#elif defined(OS_MACOSX)
-#include "media/audio/mac/audio_manager_mac.h"
-#elif defined(OS_WIN)
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#elif defined(OS_ANDROID)
-#include "media/audio/android/audio_manager_android.h"
-#endif
-
-namespace media {
-
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
-typedef AudioManagerLinux AudioManagerAnyPlatform;
-#elif defined(OS_MACOSX)
-typedef AudioManagerMac AudioManagerAnyPlatform;
-#elif defined(OS_WIN)
-typedef AudioManagerWin AudioManagerAnyPlatform;
-#elif defined(OS_ANDROID)
-typedef AudioManagerAndroid AudioManagerAnyPlatform;
-#endif
-
-// Limits the number of delay measurements we can store in an array and
-// then write to file at end of the WASAPIAudioInputOutputFullDuplex test.
-static const size_t kMaxDelayMeasurements = 1000;
-
-// Name of the output text file. The output file will be stored in the
-// directory containing media_unittests.exe.
-// Example: \src\build\Debug\audio_delay_values_ms.txt.
-// See comments for the WASAPIAudioInputOutputFullDuplex test for more details
-// about the file format.
-static const char* kDelayValuesFileName = "audio_delay_values_ms.txt";
-
-// Contains delay values which are reported during the full-duplex test.
-// Total delay = |buffer_delay_ms| + |input_delay_ms| + |output_delay_ms|.
-struct AudioDelayState {
- AudioDelayState()
- : delta_time_ms(0),
- buffer_delay_ms(0),
- input_delay_ms(0),
- output_delay_ms(0) {
- }
-
- // Time in milliseconds since last delay report. Typical value is ~10 [ms].
- int delta_time_ms;
-
- // Size of internal sync buffer. Typical value is ~0 [ms].
- int buffer_delay_ms;
-
- // Reported capture/input delay. Typical value is ~10 [ms].
- int input_delay_ms;
-
- // Reported render/output delay. Typical value is ~40 [ms].
- int output_delay_ms;
-};
-
-// This class mocks the platform specific audio manager and overrides
-// the GetMessageLoop() method to ensure that we can run our tests on
-// the main thread instead of the audio thread.
-class MockAudioManager : public AudioManagerAnyPlatform {
- public:
- MockAudioManager() {}
- virtual ~MockAudioManager() {}
-
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
- return MessageLoop::current()->message_loop_proxy();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
-};
-
-// Test fixture class.
-class AudioLowLatencyInputOutputTest : public testing::Test {
- protected:
- AudioLowLatencyInputOutputTest() {}
-
- virtual ~AudioLowLatencyInputOutputTest() {}
-
- AudioManager* audio_manager() { return &mock_audio_manager_; }
- MessageLoopForUI* message_loop() { return &message_loop_; }
-
- // Convenience method which ensures that we are not running on the build
- // bots and that at least one valid input and output device can be found.
- bool CanRunAudioTests() {
- bool input = audio_manager()->HasAudioInputDevices();
- bool output = audio_manager()->HasAudioOutputDevices();
- LOG_IF(WARNING, !input) << "No input device detected.";
- LOG_IF(WARNING, !output) << "No output device detected.";
- return input && output;
- }
-
- private:
- MessageLoopForUI message_loop_;
- MockAudioManager mock_audio_manager_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioLowLatencyInputOutputTest);
-};
-
-// This audio source/sink implementation should be used for manual tests
-// only since delay measurements are stored on an output text file.
-// All incoming/recorded audio packets are stored in an intermediate media
-// buffer which the renderer reads from when it needs audio for playout.
-// The total effect is that recorded audio is played out in loop back using
-// a sync buffer as temporary storage.
-class FullDuplexAudioSinkSource
- : public AudioInputStream::AudioInputCallback,
- public AudioOutputStream::AudioSourceCallback {
- public:
- FullDuplexAudioSinkSource(int sample_rate,
- int samples_per_packet,
- int channels)
- : sample_rate_(sample_rate),
- samples_per_packet_(samples_per_packet),
- channels_(channels),
- input_elements_to_write_(0),
- output_elements_to_write_(0),
- previous_write_time_(base::Time::Now()) {
- // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
- frame_size_ = (16 / 8) * channels_;
-
- // Start with the smallest possible buffer size. It will be increased
- // dynamically during the test if required.
- buffer_.reset(
- new media::SeekableBuffer(0, samples_per_packet_ * frame_size_));
-
- frames_to_ms_ = static_cast<double>(1000.0 / sample_rate_);
- delay_states_.reset(new AudioDelayState[kMaxDelayMeasurements]);
- }
-
- virtual ~FullDuplexAudioSinkSource() {
- // Get complete file path to output file in the directory containing
- // media_unittests.exe. Example: src/build/Debug/audio_delay_values_ms.txt.
- FilePath file_name;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
- file_name = file_name.AppendASCII(kDelayValuesFileName);
-
- FILE* text_file = file_util::OpenFile(file_name, "wt");
- DLOG_IF(ERROR, !text_file) << "Failed to open log file.";
- LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
-
- // Write the array which contains time-stamps, buffer size and
- // audio delays values to a text file.
- size_t elements_written = 0;
- while (elements_written <
- std::min(input_elements_to_write_, output_elements_to_write_)) {
- const AudioDelayState state = delay_states_[elements_written];
- fprintf(text_file, "%d %d %d %d\n",
- state.delta_time_ms,
- state.buffer_delay_ms,
- state.input_delay_ms,
- state.output_delay_ms);
- ++elements_written;
- }
-
- file_util::CloseFile(text_file);
- }
-
- // AudioInputStream::AudioInputCallback.
- virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes,
- double volume) OVERRIDE {
- base::AutoLock lock(lock_);
-
- // Update three components in the AudioDelayState for this recorded
- // audio packet.
- base::Time now_time = base::Time::Now();
- int diff = (now_time - previous_write_time_).InMilliseconds();
- previous_write_time_ = now_time;
- if (input_elements_to_write_ < kMaxDelayMeasurements) {
- delay_states_[input_elements_to_write_].delta_time_ms = diff;
- delay_states_[input_elements_to_write_].buffer_delay_ms =
- BytesToMilliseconds(buffer_->forward_bytes());
- delay_states_[input_elements_to_write_].input_delay_ms =
- BytesToMilliseconds(hardware_delay_bytes);
- ++input_elements_to_write_;
- }
-
- // Store the captured audio packet in a seekable media buffer.
- if (!buffer_->Append(src, size)) {
- // An attempt to write outside the buffer limits has been made.
- // Double the buffer capacity to ensure that we have a buffer large
- // enough to handle the current sample test scenario.
- buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
- buffer_->Clear();
- }
- }
-
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
- virtual void OnError(AudioInputStream* stream, int code) OVERRIDE {}
-
- // AudioOutputStream::AudioSourceCallback.
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState buffers_state) OVERRIDE {
- base::AutoLock lock(lock_);
-
- // Update one component in the AudioDelayState for the packet
- // which is about to be played out.
- if (output_elements_to_write_ < kMaxDelayMeasurements) {
- int output_delay_bytes = buffers_state.hardware_delay_bytes;
-#if defined(OS_WIN)
- // Special fix for Windows in combination with Wave where the
- // pending bytes field of the audio buffer state is used to
- // report the delay.
- if (!CoreAudioUtil::IsSupported()) {
- output_delay_bytes = buffers_state.pending_bytes;
- }
-#endif
- delay_states_[output_elements_to_write_].output_delay_ms =
- BytesToMilliseconds(output_delay_bytes);
- ++output_elements_to_write_;
- }
-
- int size;
- const uint8* source;
- // Read the data from the seekable media buffer which contains
- // captured data at the same size and sample rate as the output side.
- if (buffer_->GetCurrentChunk(&source, &size) && size > 0) {
- EXPECT_EQ(channels_, audio_bus->channels());
- size = std::min(audio_bus->frames() * frame_size_, size);
- EXPECT_EQ(static_cast<size_t>(size) % sizeof(*audio_bus->channel(0)), 0U);
- audio_bus->FromInterleaved(
- source, size / frame_size_, frame_size_ / channels_);
- buffer_->Seek(size);
- return size / frame_size_;
- }
-
- return 0;
- }
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
- virtual void OnError(AudioOutputStream* stream, int code) OVERRIDE {}
- virtual void WaitTillDataReady() OVERRIDE {}
-
- protected:
- // Converts from bytes to milliseconds taking the sample rate and size
- // of an audio frame into account.
- int BytesToMilliseconds(uint32 delay_bytes) const {
- return static_cast<int>((delay_bytes / frame_size_) * frames_to_ms_ + 0.5);
- }
-
- private:
- base::Lock lock_;
- scoped_ptr<media::SeekableBuffer> buffer_;
- int sample_rate_;
- int samples_per_packet_;
- int channels_;
- int frame_size_;
- double frames_to_ms_;
- scoped_array<AudioDelayState> delay_states_;
- size_t input_elements_to_write_;
- size_t output_elements_to_write_;
- base::Time previous_write_time_;
-};
-
-class AudioInputStreamTraits {
- public:
- typedef AudioInputStream StreamType;
-
- static int HardwareSampleRate() {
- return static_cast<int>(media::GetAudioInputHardwareSampleRate(
- AudioManagerBase::kDefaultDeviceId));
- }
-
- // TODO(henrika): add support for GetAudioInputHardwareBufferSize in media.
- static int HardwareBufferSize() {
- return static_cast<int>(media::GetAudioHardwareBufferSize());
- }
-
- static StreamType* CreateStream(AudioManager* audio_manager,
- const AudioParameters& params) {
- return audio_manager->MakeAudioInputStream(params,
- AudioManagerBase::kDefaultDeviceId);
- }
-};
-
-class AudioOutputStreamTraits {
- public:
- typedef AudioOutputStream StreamType;
-
- static int HardwareSampleRate() {
- return static_cast<int>(media::GetAudioHardwareSampleRate());
- }
-
- static int HardwareBufferSize() {
- return static_cast<int>(media::GetAudioHardwareBufferSize());
- }
-
- static StreamType* CreateStream(AudioManager* audio_manager,
- const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params);
- }
-};
-
-// Traits template holding a trait of StreamType. It encapsulates
-// AudioInputStream and AudioOutputStream stream types.
-template <typename StreamTraits>
-class StreamWrapper {
- public:
- typedef typename StreamTraits::StreamType StreamType;
-
- explicit StreamWrapper(AudioManager* audio_manager)
- :
-#if defined(OS_WIN)
- com_init_(base::win::ScopedCOMInitializer::kMTA),
-#endif
- audio_manager_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
-#if defined(OS_ANDROID)
- channel_layout_(CHANNEL_LAYOUT_MONO),
-#else
- channel_layout_(CHANNEL_LAYOUT_STEREO),
-#endif
- bits_per_sample_(16) {
- // Use the preferred sample rate.
- sample_rate_ = StreamTraits::HardwareSampleRate();
-
- // Use the preferred buffer size. Note that the input side uses the same
- // size as the output side in this implementation.
- samples_per_packet_ = StreamTraits::HardwareBufferSize();
- }
-
- virtual ~StreamWrapper() {}
-
- // Creates an Audio[Input|Output]Stream stream object using default
- // parameters.
- StreamType* Create() {
- return CreateStream();
- }
-
- int channels() const {
- return ChannelLayoutToChannelCount(channel_layout_);
- }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
-
- private:
- StreamType* CreateStream() {
- StreamType* stream = StreamTraits::CreateStream(audio_manager_,
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_));
- EXPECT_TRUE(stream);
- return stream;
- }
-
-#if defined(OS_WIN)
- base::win::ScopedCOMInitializer com_init_;
-#endif
-
- AudioManager* audio_manager_;
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
-};
-
-typedef StreamWrapper<AudioInputStreamTraits> AudioInputStreamWrapper;
-typedef StreamWrapper<AudioOutputStreamTraits> AudioOutputStreamWrapper;
-
-// This test is intended for manual tests and should only be enabled
-// when it is required to make a real-time test of audio in full duplex and
-// at the same time create a text file which contains measured delay values.
-// The file can later be analyzed off line using e.g. MATLAB.
-// MATLAB example:
-// D=load('audio_delay_values_ms.txt');
-// x=cumsum(D(:,1));
-// plot(x, D(:,2), x, D(:,3), x, D(:,4), x, D(:,2)+D(:,3)+D(:,4));
-// axis([0, max(x), 0, max(D(:,2)+D(:,3)+D(:,4))+10]);
-// legend('buffer delay','input delay','output delay','total delay');
-// xlabel('time [msec]')
-// ylabel('delay [msec]')
-// title('Full-duplex audio delay measurement');
-TEST_F(AudioLowLatencyInputOutputTest, DISABLED_FullDuplexDelayMeasurement) {
- if (!CanRunAudioTests())
- return;
-
- AudioInputStreamWrapper aisw(audio_manager());
- AudioInputStream* ais = aisw.Create();
- EXPECT_TRUE(ais);
-
- AudioOutputStreamWrapper aosw(audio_manager());
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos);
-
- // This test only supports identical parameters in both directions.
- // TODO(henrika): it is possible to cut delay here by using different
- // buffer sizes for input and output.
- if (aisw.sample_rate() != aosw.sample_rate() ||
- aisw.samples_per_packet() != aosw.samples_per_packet() ||
- aisw.channels()!= aosw.channels() ||
- aisw.bits_per_sample() != aosw.bits_per_sample()) {
- LOG(ERROR) << "This test requires symmetric input and output parameters. "
- "Ensure that sample rate and number of channels are identical in "
- "both directions";
- aos->Close();
- ais->Close();
- return;
- }
-
- EXPECT_TRUE(ais->Open());
- EXPECT_TRUE(aos->Open());
-
- FullDuplexAudioSinkSource full_duplex(
- aisw.sample_rate(), aisw.samples_per_packet(), aisw.channels());
-
- LOG(INFO) << ">> You should now be able to hear yourself in loopback...";
- DLOG(INFO) << " sample_rate : " << aisw.sample_rate();
- DLOG(INFO) << " samples_per_packet: " << aisw.samples_per_packet();
- DLOG(INFO) << " channels : " << aisw.channels();
-
- ais->Start(&full_duplex);
- aos->Start(&full_duplex);
-
- // Wait for approximately 10 seconds. The user shall hear his own voice
- // in loop back during this time. At the same time, delay recordings are
- // performed and stored in the output text file.
- message_loop()->PostDelayedTask(FROM_HERE,
- MessageLoop::QuitClosure(), TestTimeouts::action_timeout());
- message_loop()->Run();
-
- aos->Stop();
- ais->Stop();
-
- // All Close() operations that run on the mocked audio thread,
- // should be synchronous and not post additional close tasks to
- // mocked the audio thread. Hence, there is no need to call
- // message_loop()->RunUntilIdle() after the Close() methods.
- aos->Close();
- ais->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_manager.cc b/src/media/audio/audio_manager.cc
deleted file mode 100644
index 9372d08..0000000
--- a/src/media/audio/audio_manager.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_manager.h"
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "base/message_loop.h"
-
-namespace media {
-
-// Forward declaration of the platform specific AudioManager factory function.
-AudioManager* CreateAudioManager();
-
-AudioManager::AudioManager() {
-}
-
-AudioManager::~AudioManager() {
-}
-
-// static
-AudioManager* AudioManager::Create() {
- return CreateAudioManager();
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_manager.h b/src/media/audio/audio_manager.h
deleted file mode 100644
index ca4c468..0000000
--- a/src/media/audio/audio_manager.h
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_MANAGER_H_
-#define MEDIA_AUDIO_AUDIO_MANAGER_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/string16.h"
-#include "media/audio/audio_device_name.h"
-#include "media/audio/audio_parameters.h"
-
-class MessageLoop;
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-class AudioInputStream;
-class AudioOutputStream;
-
-// Manages all audio resources. In particular it owns the AudioOutputStream
-// objects. Provides some convenience functions that avoid the need to provide
-// iterators over the existing streams.
-class MEDIA_EXPORT AudioManager {
- public:
- virtual ~AudioManager();
-
- // Use to construct the audio manager.
- // NOTE: There should only be one instance.
- static AudioManager* Create();
-
- // Returns true if the OS reports existence of audio devices. This does not
- // guarantee that the existing devices support all formats and sample rates.
- virtual bool HasAudioOutputDevices() = 0;
-
- // Returns true if the OS reports existence of audio recording devices. This
- // does not guarantee that the existing devices support all formats and
- // sample rates.
- virtual bool HasAudioInputDevices() = 0;
-
- // Returns a human readable string for the model/make of the active audio
- // input device for this computer.
- virtual string16 GetAudioInputDeviceModel() = 0;
-
- // Returns true if the platform specific audio input settings UI is known
- // and can be shown.
- virtual bool CanShowAudioInputSettings() = 0;
-
- // Opens the platform default audio input settings UI.
- // Note: This could invoke an external application/preferences pane, so
- // ideally must not be called from the UI thread or other time sensitive
- // threads to avoid blocking the rest of the application.
- virtual void ShowAudioInputSettings() = 0;
-
- // Appends a list of available input devices. It is not guaranteed that
- // all the devices in the list support all formats and sample rates for
- // recording.
- virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
-
- // Factory for all the supported stream formats. |params| defines parameters
- // of the audio stream to be created.
- //
- // |params.sample_per_packet| is the requested buffer allocation which the
- // audio source thinks it can usually fill without blocking. Internally two
- // or three buffers are created, one will be locked for playback and one will
- // be ready to be filled in the call to AudioSourceCallback::OnMoreData().
- //
- // Returns NULL if the combination of the parameters is not supported, or if
- // we have reached some other platform specific limit.
- //
- // |params.format| can be set to AUDIO_PCM_LOW_LATENCY and that has two
- // effects:
- // 1- Instead of triple buffered the audio will be double buffered.
- // 2- A low latency driver or alternative audio subsystem will be used when
- // available.
- //
- // Do not free the returned AudioOutputStream. It is owned by AudioManager.
- virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params) = 0;
-
- // Creates new audio output proxy. A proxy implements
- // AudioOutputStream interface, but unlike regular output stream
- // created with MakeAudioOutputStream() it opens device only when a
- // sound is actually playing.
- virtual AudioOutputStream* MakeAudioOutputStreamProxy(
- const AudioParameters& params) = 0;
-
- // Factory to create audio recording streams.
- // |channels| can be 1 or 2.
- // |sample_rate| is in hertz and can be any value supported by the platform.
- // |bits_per_sample| can be any value supported by the platform.
- // |samples_per_packet| is in hertz as well and can be 0 to |sample_rate|,
- // with 0 suggesting that the implementation use a default value for that
- // platform.
- // Returns NULL if the combination of the parameters is not supported, or if
- // we have reached some other platform specific limit.
- //
- // Do not free the returned AudioInputStream. It is owned by AudioManager.
- // When you are done with it, call |Stop()| and |Close()| to release it.
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) = 0;
-
- // Used to determine if something else is currently making use of audio input.
- virtual bool IsRecordingInProcess() = 0;
-
- // Returns message loop used for audio IO.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
-
- // Allows clients to listen for device state changes; e.g. preferred sample
- // rate or channel layout changes. The typical response to receiving this
- // callback is to recreate the stream.
- class AudioDeviceListener {
- public:
- virtual void OnDeviceChange() = 0;
- };
-
- virtual void AddOutputDeviceChangeListener(AudioDeviceListener* listener) = 0;
- virtual void RemoveOutputDeviceChangeListener(
- AudioDeviceListener* listener) = 0;
-
- protected:
- AudioManager();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioManager);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_MANAGER_H_
diff --git a/src/media/audio/audio_manager_base.cc b/src/media/audio/audio_manager_base.cc
deleted file mode 100644
index 6333039..0000000
--- a/src/media/audio/audio_manager_base.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_manager_base.h"
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/message_loop_proxy.h"
-#include "base/threading/thread.h"
-#include "media/audio/audio_output_dispatcher_impl.h"
-#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_output_resampler.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/fake_audio_input_stream.h"
-#include "media/audio/fake_audio_output_stream.h"
-#include "media/audio/virtual_audio_input_stream.h"
-#include "media/audio/virtual_audio_output_stream.h"
-#include "media/base/media_switches.h"
-
-// TODO(dalecurtis): Temporarily disabled while switching pipeline to use float,
-// http://crbug.com/114700
-#if defined(ENABLE_AUDIO_MIXER)
-#include "media/audio/audio_output_mixer.h"
-#endif
-
-namespace media {
-
-static const int kStreamCloseDelaySeconds = 5;
-
-// Default maximum number of output streams that can be open simultaneously
-// for all platforms.
-static const int kDefaultMaxOutputStreams = 16;
-
-// Default maximum number of input streams that can be open simultaneously
-// for all platforms.
-static const int kDefaultMaxInputStreams = 16;
-
-static const int kMaxInputChannels = 2;
-
-const char AudioManagerBase::kDefaultDeviceName[] = "Default";
-const char AudioManagerBase::kDefaultDeviceId[] = "default";
-
-AudioManagerBase::AudioManagerBase()
- : num_active_input_streams_(0),
- max_num_output_streams_(kDefaultMaxOutputStreams),
- max_num_input_streams_(kDefaultMaxInputStreams),
- num_output_streams_(0),
- num_input_streams_(0),
- audio_thread_(new base::Thread("AudioThread")),
- virtual_audio_input_stream_(NULL) {
-#if defined(OS_WIN)
- audio_thread_->init_com_with_mta(true);
-#endif
- CHECK(audio_thread_->Start());
- message_loop_ = audio_thread_->message_loop_proxy();
-}
-
-AudioManagerBase::~AudioManagerBase() {
- // The platform specific AudioManager implementation must have already
- // stopped the audio thread. Otherwise, we may destroy audio streams before
- // stopping the thread, resulting an unexpected behavior.
- // This way we make sure activities of the audio streams are all stopped
- // before we destroy them.
- CHECK(!audio_thread_.get());
- // All the output streams should have been deleted.
- DCHECK_EQ(0, num_output_streams_);
- // All the input streams should have been deleted.
- DCHECK_EQ(0, num_input_streams_);
-}
-
-string16 AudioManagerBase::GetAudioInputDeviceModel() {
- return string16();
-}
-
-scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
- return message_loop_;
-}
-
-AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
- const AudioParameters& params) {
- if (!params.IsValid()) {
- DLOG(ERROR) << "Audio parameters are invalid";
- return NULL;
- }
-
- // Limit the number of audio streams opened. This is to prevent using
- // excessive resources for a large number of audio streams. More
- // importantly it prevents instability on certain systems.
- // See bug: http://crbug.com/30242.
- if (num_output_streams_ >= max_num_output_streams_) {
- DLOG(ERROR) << "Number of opened output audio streams "
- << num_output_streams_
- << " exceed the max allowed number "
- << max_num_output_streams_;
- return NULL;
- }
-
- // If there are no audio output devices we should use a FakeAudioOutputStream
- // to ensure video playback continues to work.
- bool audio_output_disabled =
- params.format() == AudioParameters::AUDIO_FAKE ||
- !HasAudioOutputDevices();
-
- AudioOutputStream* stream = NULL;
- if (virtual_audio_input_stream_) {
-#if defined(OS_IOS)
- // We do not currently support iOS. It does not link.
- NOTIMPLEMENTED();
- return NULL;
-#else
- stream = VirtualAudioOutputStream::MakeStream(this, params, message_loop_,
- virtual_audio_input_stream_);
-#endif
- } else if (audio_output_disabled) {
- stream = FakeAudioOutputStream::MakeFakeStream(this, params);
- } else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR) {
- stream = MakeLinearOutputStream(params);
- } else if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
- stream = MakeLowLatencyOutputStream(params);
- }
-
- if (stream)
- ++num_output_streams_;
-
- return stream;
-}
-
-AudioInputStream* AudioManagerBase::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
- if (!params.IsValid() || (params.channels() > kMaxInputChannels) ||
- device_id.empty()) {
- DLOG(ERROR) << "Audio parameters are invalid for device " << device_id;
- return NULL;
- }
-
- if (num_input_streams_ >= max_num_input_streams_) {
- DLOG(ERROR) << "Number of opened input audio streams "
- << num_input_streams_
- << " exceed the max allowed number " << max_num_input_streams_;
- return NULL;
- }
-
- AudioInputStream* stream = NULL;
- if (params.format() == AudioParameters::AUDIO_VIRTUAL) {
-#if defined(OS_IOS)
- // We do not currently support iOS.
- NOTIMPLEMENTED();
- return NULL;
-#else
- // TODO(justinlin): Currently, audio mirroring will only work for the first
- // request. Subsequent requests will not get audio.
- if (!virtual_audio_input_stream_) {
- virtual_audio_input_stream_ =
- VirtualAudioInputStream::MakeStream(this, params, message_loop_);
- stream = virtual_audio_input_stream_;
- DVLOG(1) << "Virtual audio input stream created.";
-
- // Make all current output streams recreate themselves as
- // VirtualAudioOutputStreams that will attach to the above
- // VirtualAudioInputStream.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioManagerBase::NotifyAllOutputDeviceChangeListeners,
- base::Unretained(this)));
- } else {
- stream = NULL;
- }
-#endif
- } else if (params.format() == AudioParameters::AUDIO_FAKE) {
- stream = FakeAudioInputStream::MakeFakeStream(this, params);
- } else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR) {
- stream = MakeLinearInputStream(params, device_id);
- } else if (params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
- stream = MakeLowLatencyInputStream(params, device_id);
- }
-
- if (stream)
- ++num_input_streams_;
-
- return stream;
-}
-
-AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
- const AudioParameters& params) {
-#if defined(OS_IOS)
- // IOS implements audio input only.
- NOTIMPLEMENTED();
- return NULL;
-#else
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- bool use_audio_output_resampler =
- !CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableAudioOutputResampler) &&
- params.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY;
-
- // If we're not using AudioOutputResampler our output parameters are the same
- // as our input parameters.
- AudioParameters output_params = params;
- if (use_audio_output_resampler) {
- output_params = GetPreferredLowLatencyOutputStreamParameters(params);
-
- // Ensure we only pass on valid output parameters.
- if (!output_params.IsValid()) {
- // We've received invalid audio output parameters, so switch to a mock
- // output device based on the input parameters. This may happen if the OS
- // provided us junk values for the hardware configuration.
- LOG(ERROR) << "Invalid audio output parameters received; using fake "
- << "audio path. Channels: " << output_params.channels() << ", "
- << "Sample Rate: " << output_params.sample_rate() << ", "
- << "Bits Per Sample: " << output_params.bits_per_sample()
- << ", Frames Per Buffer: "
- << output_params.frames_per_buffer();
-
- // Tell the AudioManager to create a fake output device.
- output_params = AudioParameters(
- AudioParameters::AUDIO_FAKE, params.channel_layout(),
- params.sample_rate(), params.bits_per_sample(),
- params.frames_per_buffer());
- }
- }
-
- std::pair<AudioParameters, AudioParameters> dispatcher_key =
- std::make_pair(params, output_params);
- AudioOutputDispatchersMap::iterator it =
- output_dispatchers_.find(dispatcher_key);
- if (it != output_dispatchers_.end())
- return new AudioOutputProxy(it->second);
-
- base::TimeDelta close_delay =
- base::TimeDelta::FromSeconds(kStreamCloseDelaySeconds);
-
- if (use_audio_output_resampler &&
- output_params.format() != AudioParameters::AUDIO_FAKE) {
- scoped_refptr<AudioOutputDispatcher> dispatcher =
- new AudioOutputResampler(this, params, output_params, close_delay);
- output_dispatchers_[dispatcher_key] = dispatcher;
- return new AudioOutputProxy(dispatcher);
- }
-
-#if defined(ENABLE_AUDIO_MIXER)
- // TODO(dalecurtis): Browser side mixing has a couple issues that must be
- // fixed before it can be turned on by default: http://crbug.com/138098 and
- // http://crbug.com/140247
- if (cmd_line->HasSwitch(switches::kEnableAudioMixer)) {
- scoped_refptr<AudioOutputDispatcher> dispatcher =
- new AudioOutputMixer(this, params, close_delay);
- output_dispatchers_[dispatcher_key] = dispatcher;
- return new AudioOutputProxy(dispatcher);
- }
-#endif
-
- scoped_refptr<AudioOutputDispatcher> dispatcher =
- new AudioOutputDispatcherImpl(this, output_params, close_delay);
- output_dispatchers_[dispatcher_key] = dispatcher;
- return new AudioOutputProxy(dispatcher);
-#endif // defined(OS_IOS)
-}
-
-bool AudioManagerBase::CanShowAudioInputSettings() {
- return false;
-}
-
-void AudioManagerBase::ShowAudioInputSettings() {
-}
-
-void AudioManagerBase::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
-}
-
-void AudioManagerBase::ReleaseOutputStream(AudioOutputStream* stream) {
- DCHECK(stream);
- // TODO(xians) : Have a clearer destruction path for the AudioOutputStream.
- // For example, pass the ownership to AudioManager so it can delete the
- // streams.
- num_output_streams_--;
- delete stream;
-}
-
-void AudioManagerBase::ReleaseInputStream(AudioInputStream* stream) {
- DCHECK(stream);
- // TODO(xians) : Have a clearer destruction path for the AudioInputStream.
-
- if (virtual_audio_input_stream_ == stream) {
- DVLOG(1) << "Virtual audio input stream stopping.";
- virtual_audio_input_stream_->Stop();
- virtual_audio_input_stream_ = NULL;
-
- // Make all VirtualAudioOutputStreams unregister from the
- // VirtualAudioInputStream and recreate themselves as regular audio streams
- // to return sound to hardware.
- NotifyAllOutputDeviceChangeListeners();
- }
-
- num_input_streams_--;
- delete stream;
-}
-
-void AudioManagerBase::IncreaseActiveInputStreamCount() {
- base::AtomicRefCountInc(&num_active_input_streams_);
-}
-
-void AudioManagerBase::DecreaseActiveInputStreamCount() {
- DCHECK(IsRecordingInProcess());
- base::AtomicRefCountDec(&num_active_input_streams_);
-}
-
-bool AudioManagerBase::IsRecordingInProcess() {
- return !base::AtomicRefCountIsZero(&num_active_input_streams_);
-}
-
-void AudioManagerBase::Shutdown() {
- // To avoid running into deadlocks while we stop the thread, shut it down
- // via a local variable while not holding the audio thread lock.
- scoped_ptr<base::Thread> audio_thread;
- {
- base::AutoLock lock(audio_thread_lock_);
- audio_thread_.swap(audio_thread);
- }
-
- if (!audio_thread.get())
- return;
-
- CHECK_NE(MessageLoop::current(), audio_thread->message_loop());
-
- // We must use base::Unretained since Shutdown might have been called from
- // the destructor and we can't alter the refcount of the object at that point.
- audio_thread->message_loop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerBase::ShutdownOnAudioThread,
- base::Unretained(this)));
-
- // Stop() will wait for any posted messages to be processed first.
- audio_thread->Stop();
-}
-
-void AudioManagerBase::ShutdownOnAudioThread() {
-// IOS implements audio input only.
-#if defined(OS_IOS)
- return;
-#else
- // This should always be running on the audio thread, but since we've cleared
- // the audio_thread_ member pointer when we get here, we can't verify exactly
- // what thread we're running on. The method is not public though and only
- // called from one place, so we'll leave it at that.
- AudioOutputDispatchersMap::iterator it = output_dispatchers_.begin();
- for (; it != output_dispatchers_.end(); ++it) {
- scoped_refptr<AudioOutputDispatcher>& dispatcher = (*it).second;
- if (dispatcher) {
- dispatcher->Shutdown();
- // All AudioOutputProxies must have been freed before Shutdown is called.
- // If they still exist, things will go bad. They have direct pointers to
- // both physical audio stream objects that belong to the dispatcher as
- // well as the message loop of the audio thread that will soon go away.
- // So, better crash now than later.
- DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
- dispatcher = NULL;
- }
- }
-
- output_dispatchers_.clear();
-#endif // defined(OS_IOS)
-}
-
-AudioParameters AudioManagerBase::GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) {
-#if defined(OS_IOS)
- // IOS implements audio input only.
- NOTIMPLEMENTED();
- return AudioParameters();
-#else
- // TODO(dalecurtis): This should include bits per channel and channel layout
- // eventually.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, input_params.channel_layout(),
- GetAudioHardwareSampleRate(), 16, GetAudioHardwareBufferSize());
-#endif // defined(OS_IOS)
-}
-
-void AudioManagerBase::AddOutputDeviceChangeListener(
- AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- output_listeners_.AddObserver(listener);
-}
-
-void AudioManagerBase::RemoveOutputDeviceChangeListener(
- AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- output_listeners_.RemoveObserver(listener);
-}
-
-void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DVLOG(1) << "Firing OnDeviceChange() notifications.";
- FOR_EACH_OBSERVER(AudioDeviceListener, output_listeners_, OnDeviceChange());
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_manager_base.h b/src/media/audio/audio_manager_base.h
deleted file mode 100644
index 83ad98b..0000000
--- a/src/media/audio/audio_manager_base.h
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
-#define MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
-
-#include <map>
-#include <string>
-#include <utility>
-
-#include "base/atomic_ref_count.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/observer_list.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_manager.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_com_initializer.h"
-#endif
-
-namespace base {
-class Thread;
-}
-
-namespace media {
-
-class AudioOutputDispatcher;
-class VirtualAudioInputStream;
-
-// AudioManagerBase provides AudioManager functions common for all platforms.
-class MEDIA_EXPORT AudioManagerBase : public AudioManager {
- public:
- // Name of the generic "default" device.
- static const char kDefaultDeviceName[];
- // Unique Id of the generic "default" device.
- static const char kDefaultDeviceId[];
-
- virtual ~AudioManagerBase();
-
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
-
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
-
- virtual bool CanShowAudioInputSettings() OVERRIDE;
- virtual void ShowAudioInputSettings() OVERRIDE;
-
- virtual void GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) OVERRIDE;
-
- virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params) OVERRIDE;
-
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
-
- virtual AudioOutputStream* MakeAudioOutputStreamProxy(
- const AudioParameters& params) OVERRIDE;
-
- virtual bool IsRecordingInProcess() OVERRIDE;
-
- // Called internally by the audio stream when it has been closed.
- virtual void ReleaseOutputStream(AudioOutputStream* stream);
- virtual void ReleaseInputStream(AudioInputStream* stream);
-
- void IncreaseActiveInputStreamCount();
- void DecreaseActiveInputStreamCount();
-
- // Creates the output stream for the |AUDIO_PCM_LINEAR| format. The legacy
- // name is also from |AUDIO_PCM_LINEAR|.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) = 0;
-
- // Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) = 0;
-
- // Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
- // name is also from |AUDIO_PCM_LINEAR|.
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) = 0;
-
- // Creates the input stream for the |AUDIO_PCM_LOW_LATENCY| format.
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) = 0;
-
- // Returns the preferred hardware audio output parameters for opening output
- // streams in the |AUDIO_PCM_LOW_LATENCY| format.
- // TODO(dalecurtis): Retrieve the |channel_layout| value from hardware instead
- // of accepting the value.
- // TODO(dalecurtis): Each AudioManager should implement their own version, see
- // http://crbug.com/137326
- virtual AudioParameters GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params);
-
- // Listeners will be notified on the AudioManager::GetMessageLoop() loop.
- virtual void AddOutputDeviceChangeListener(
- AudioDeviceListener* listener) OVERRIDE;
- virtual void RemoveOutputDeviceChangeListener(
- AudioDeviceListener* listener) OVERRIDE;
-
- protected:
- AudioManagerBase();
-
- // TODO(dalecurtis): This must change to map both input and output parameters
- // to a single dispatcher, otherwise on a device state change we'll just get
- // the exact same invalid dispatcher.
- typedef std::map<std::pair<AudioParameters, AudioParameters>,
- scoped_refptr<AudioOutputDispatcher> >
- AudioOutputDispatchersMap;
-
- // Shuts down the audio thread and releases all the audio output dispatchers
- // on the audio thread. All audio streams should be freed before Shutdown()
- // is called. This must be called in the destructor of every AudioManagerBase
- // implementation.
- void Shutdown();
-
- void SetMaxOutputStreamsAllowed(int max) { max_num_output_streams_ = max; }
-
- // Called by each platform specific AudioManager to notify output state change
- // listeners that a state change has occurred. Must be called from the audio
- // thread.
- void NotifyAllOutputDeviceChangeListeners();
-
- // Map of cached AudioOutputDispatcher instances. Must only be touched
- // from the audio thread (no locking).
- AudioOutputDispatchersMap output_dispatchers_;
-
- private:
- // Called by Shutdown().
- void ShutdownOnAudioThread();
-
- // Counts the number of active input streams to find out if something else
- // is currently recording in Chrome.
- base::AtomicRefCount num_active_input_streams_;
-
- // Max number of open output streams, modified by
- // SetMaxOutputStreamsAllowed().
- int max_num_output_streams_;
-
- // Max number of open input streams.
- int max_num_input_streams_;
-
- // Number of currently open output streams.
- int num_output_streams_;
-
- // Number of currently open input streams.
- int num_input_streams_;
-
- // Track output state change listeners.
- ObserverList<AudioDeviceListener> output_listeners_;
-
- // Thread used to interact with audio streams created by this audio manager.
- scoped_ptr<base::Thread> audio_thread_;
- mutable base::Lock audio_thread_lock_;
-
- // The message loop of the audio thread this object runs on. Used for internal
- // tasks which run on the audio thread even after Shutdown() has been started
- // and GetMessageLoop() starts returning NULL.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
-
- // Currently active VirtualAudioInputStream. When this is set, we will
- // create all audio output streams as virtual streams so as to redirect audio
- // data to this virtual input stream.
- VirtualAudioInputStream* virtual_audio_input_stream_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerBase);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_MANAGER_BASE_H_
diff --git a/src/media/audio/audio_output_controller.cc b/src/media/audio/audio_output_controller.cc
deleted file mode 100644
index 50850c9..0000000
--- a/src/media/audio/audio_output_controller.cc
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_controller.h"
-
-#include "base/bind.h"
-#include "base/debug/trace_event.h"
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/time.h"
-#include "build/build_config.h"
-#include "media/audio/shared_memory_util.h"
-
-using base::Time;
-using base::TimeDelta;
-using base::WaitableEvent;
-
-namespace media {
-
-// Polling-related constants.
-const int AudioOutputController::kPollNumAttempts = 3;
-const int AudioOutputController::kPollPauseInMilliseconds = 3;
-
-AudioOutputController::AudioOutputController(AudioManager* audio_manager,
- EventHandler* handler,
- const AudioParameters& params,
- SyncReader* sync_reader)
- : audio_manager_(audio_manager),
- handler_(handler),
- stream_(NULL),
- volume_(1.0),
- state_(kEmpty),
- sync_reader_(sync_reader),
- message_loop_(audio_manager->GetMessageLoop()),
- number_polling_attempts_left_(0),
- params_(params),
- ALLOW_THIS_IN_INITIALIZER_LIST(weak_this_(this)) {
-}
-
-AudioOutputController::~AudioOutputController() {
- DCHECK_EQ(kClosed, state_);
-
- if (message_loop_->BelongsToCurrentThread()) {
- DoStopCloseAndClearStream(NULL);
- } else {
- // http://crbug.com/120973
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
- WaitableEvent completion(true /* manual reset */,
- false /* initial state */);
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&AudioOutputController::DoStopCloseAndClearStream,
- base::Unretained(this),
- &completion));
- completion.Wait();
- }
-}
-
-// static
-scoped_refptr<AudioOutputController> AudioOutputController::Create(
- AudioManager* audio_manager,
- EventHandler* event_handler,
- const AudioParameters& params,
- SyncReader* sync_reader) {
- DCHECK(audio_manager);
- DCHECK(sync_reader);
-
- if (!params.IsValid() || !audio_manager)
- return NULL;
-
- // Starts the audio controller thread.
- scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, sync_reader));
-
- controller->message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoCreate, controller));
-
- return controller;
-}
-
-void AudioOutputController::Play() {
- DCHECK(message_loop_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoPlay, this));
-}
-
-void AudioOutputController::Pause() {
- DCHECK(message_loop_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoPause, this));
-}
-
-void AudioOutputController::Flush() {
- DCHECK(message_loop_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoFlush, this));
-}
-
-void AudioOutputController::Close(const base::Closure& closed_task) {
- DCHECK(!closed_task.is_null());
- DCHECK(message_loop_);
- message_loop_->PostTaskAndReply(FROM_HERE, base::Bind(
- &AudioOutputController::DoClose, this), closed_task);
-}
-
-void AudioOutputController::SetVolume(double volume) {
- DCHECK(message_loop_);
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoSetVolume, this, volume));
-}
-
-void AudioOutputController::DoCreate() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Close() can be called before DoCreate() is executed.
- if (state_ == kClosed)
- return;
- DCHECK(state_ == kEmpty || state_ == kRecreating) << state_;
-
- DoStopCloseAndClearStream(NULL);
- stream_ = audio_manager_->MakeAudioOutputStreamProxy(params_);
- if (!stream_) {
- state_ = kError;
-
- // TODO(hclam): Define error types.
- handler_->OnError(this, 0);
- return;
- }
-
- if (!stream_->Open()) {
- state_ = kError;
- DoStopCloseAndClearStream(NULL);
-
- // TODO(hclam): Define error types.
- handler_->OnError(this, 0);
- return;
- }
-
- // Everything started okay, so register for state change callbacks if we have
- // not already done so.
- if (state_ != kRecreating)
- audio_manager_->AddOutputDeviceChangeListener(this);
-
- // We have successfully opened the stream. Set the initial volume.
- stream_->SetVolume(volume_);
-
- // Finally set the state to kCreated.
- State original_state = state_;
- state_ = kCreated;
-
- // And then report we have been created if we haven't done so already.
- if (original_state != kRecreating)
- handler_->OnCreated(this);
-}
-
-void AudioOutputController::DoPlay() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // We can start from created or paused state.
- if (state_ != kCreated && state_ != kPaused) {
- // If a pause is pending drop it. Otherwise the controller might hang since
- // the corresponding play event has already occurred.
- if (state_ == kPausedWhenStarting)
- state_ = kStarting;
- return;
- }
-
- state_ = kStarting;
-
- // Ask for first packet.
- sync_reader_->UpdatePendingBytes(0);
-
- // Cannot start stream immediately, should give renderer some time
- // to deliver data.
- // TODO(vrk): The polling here and in WaitTillDataReady() is pretty clunky.
- // Refine the API such that polling is no longer needed. (crbug.com/112196)
- number_polling_attempts_left_ = kPollNumAttempts;
- message_loop_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AudioOutputController::PollAndStartIfDataReady,
- weak_this_.GetWeakPtr()),
- TimeDelta::FromMilliseconds(kPollPauseInMilliseconds));
-}
-
-void AudioOutputController::PollAndStartIfDataReady() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Being paranoid: do nothing if state unexpectedly changed.
- if ((state_ != kStarting) && (state_ != kPausedWhenStarting))
- return;
-
- bool pausing = (state_ == kPausedWhenStarting);
- // If we are ready to start the stream, start it.
- // Of course we may have to stop it immediately...
- if (--number_polling_attempts_left_ == 0 ||
- pausing ||
- sync_reader_->DataReady()) {
- StartStream();
- if (pausing) {
- DoPause();
- }
- } else {
- message_loop_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AudioOutputController::PollAndStartIfDataReady,
- weak_this_.GetWeakPtr()),
- TimeDelta::FromMilliseconds(kPollPauseInMilliseconds));
- }
-}
-
-void AudioOutputController::StartStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- state_ = kPlaying;
-
- // We start the AudioOutputStream lazily.
- stream_->Start(this);
-
- // Tell the event handler that we are now playing.
- handler_->OnPlaying(this);
-}
-
-void AudioOutputController::DoPause() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (stream_) {
- // Then we stop the audio device. This is not the perfect solution
- // because it discards all the internal buffer in the audio device.
- // TODO(hclam): Actually pause the audio device.
- stream_->Stop();
- }
-
- switch (state_) {
- case kStarting:
- // We were asked to pause while starting. There is delayed task that will
- // try starting playback, and there is no way to remove that task from the
- // queue. If we stop now that task will be executed anyway.
- // Delay pausing, let delayed task to do pause after it start playback.
- state_ = kPausedWhenStarting;
- break;
- case kPlaying:
- state_ = kPaused;
-
- // Send a special pause mark to the low-latency audio thread.
- sync_reader_->UpdatePendingBytes(kPauseMark);
-
- handler_->OnPaused(this);
- break;
- default:
- return;
- }
-}
-
-void AudioOutputController::DoFlush() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // TODO(hclam): Actually flush the audio device.
-}
-
-void AudioOutputController::DoClose() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ != kClosed) {
- DoStopCloseAndClearStream(NULL);
- sync_reader_->Close();
- state_ = kClosed;
- }
-}
-
-void AudioOutputController::DoSetVolume(double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Saves the volume to a member first. We may not be able to set the volume
- // right away but when the stream is created we'll set the volume.
- volume_ = volume;
-
- switch (state_) {
- case kCreated:
- case kStarting:
- case kPausedWhenStarting:
- case kPlaying:
- case kPaused:
- stream_->SetVolume(volume_);
- break;
- default:
- return;
- }
-}
-
-void AudioOutputController::DoReportError(int code) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- if (state_ != kClosed)
- handler_->OnError(this, code);
-}
-
-int AudioOutputController::OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int AudioOutputController::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- TRACE_EVENT0("audio", "AudioOutputController::OnMoreIOData");
-
- {
- // Check state and do nothing if we are not playing.
- // We are on the hardware audio thread, so lock is needed.
- base::AutoLock auto_lock(lock_);
- if (state_ != kPlaying) {
- return 0;
- }
- }
-
- int frames = sync_reader_->Read(source, dest);
- sync_reader_->UpdatePendingBytes(
- buffers_state.total_bytes() + frames * params_.GetBytesPerFrame());
- return frames;
-}
-
-void AudioOutputController::WaitTillDataReady() {
-#if defined(OS_WIN) || defined(OS_MACOSX)
- base::Time start = base::Time::Now();
- // Wait for up to 1.5 seconds for DataReady(). 1.5 seconds was chosen because
- // it's larger than the playback time of the WaveOut buffer size using the
- // minimum supported sample rate: 4096 / 3000 = ~1.4 seconds. Even a client
- // expecting real time playout should be able to fill in this time.
- const base::TimeDelta max_wait = base::TimeDelta::FromMilliseconds(1500);
- while (!sync_reader_->DataReady() &&
- ((base::Time::Now() - start) < max_wait)) {
- base::PlatformThread::YieldCurrentThread();
- }
-#else
- // WaitTillDataReady() is deprecated and should not be used.
- CHECK(false);
-#endif
-}
-
-void AudioOutputController::OnError(AudioOutputStream* stream, int code) {
- // Handle error on the audio controller thread.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputController::DoReportError, this, code));
-}
-
-void AudioOutputController::DoStopCloseAndClearStream(WaitableEvent* done) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Allow calling unconditionally and bail if we don't have a stream_ to close.
- if (stream_) {
- stream_->Stop();
- stream_->Close();
- stream_ = NULL;
-
- audio_manager_->RemoveOutputDeviceChangeListener(this);
- audio_manager_ = NULL;
-
- weak_this_.InvalidateWeakPtrs();
- }
-
- // Should be last in the method, do not touch "this" from here on.
- if (done)
- done->Signal();
-}
-
-void AudioOutputController::OnDeviceChange() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // We should always have a stream by this point.
- CHECK(stream_);
-
- // Preserve the original state and shutdown the stream.
- State original_state = state_;
- stream_->Stop();
- stream_->Close();
- stream_ = NULL;
-
- // Recreate the stream, exit if we ran into an error.
- state_ = kRecreating;
- DoCreate();
- if (!stream_ || state_ == kError)
- return;
-
- // Get us back to the original state or an equivalent state.
- switch (original_state) {
- case kStarting:
- case kPlaying:
- DoPlay();
- return;
- case kCreated:
- case kPausedWhenStarting:
- case kPaused:
- // From the outside these three states are equivalent.
- return;
- default:
- NOTREACHED() << "Invalid original state.";
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_controller.h b/src/media/audio/audio_output_controller.h
deleted file mode 100644
index 762a948..0000000
--- a/src/media/audio/audio_output_controller.h
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_buffers_state.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "media/base/media_export.h"
-
-namespace base {
-class WaitableEvent;
-} // namespace base
-
-class MessageLoop;
-
-// An AudioOutputController controls an AudioOutputStream and provides data
-// to this output stream. It has an important function that it executes
-// audio operations like play, pause, stop, etc. on a separate thread,
-// namely the audio manager thread.
-//
-// All the public methods of AudioOutputController are non-blocking.
-// The actual operations are performed on the audio manager thread.
-//
-// Here is a state diagram for the AudioOutputController:
-//
-// .-----------------------> [ Closed / Error ] <------.
-// | ^ |
-// | | |
-// [ Created ] --> [ Starting ] --> [ Playing ] --> [ Paused ]
-// ^ | ^ | ^
-// | | | | |
-// | | `----------------' |
-// | V |
-// | [ PausedWhenStarting ] ------------------------'
-// |
-// *[ Empty ]
-//
-// * Initial state
-//
-// At any time after reaching the Created state but before Closed / Error, the
-// AudioOutputController may be notified of a device change via OnDeviceChange()
-// and transition to the Recreating state. If OnDeviceChange() completes
-// successfully the state will transition back to an equivalent pre-call state.
-// E.g., if the state was Paused or PausedWhenStarting, the new state will be
-// Created, since these states are all functionally equivalent and require a
-// Play() call to continue to the next state.
-//
-// The AudioOutputStream can request data from the AudioOutputController via the
-// AudioSourceCallback interface. AudioOutputController uses the SyncReader
-// passed to it via construction to synchronously fulfill this read request.
-//
-// Since AudioOutputController uses AudioManager's message loop the controller
-// uses WeakPtr to allow safe cancellation of pending tasks.
-//
-
-namespace media {
-
-class MEDIA_EXPORT AudioOutputController
- : public base::RefCountedThreadSafe<AudioOutputController>,
- public AudioOutputStream::AudioSourceCallback,
- NON_EXPORTED_BASE(public AudioManager::AudioDeviceListener) {
- public:
- // An event handler that receives events from the AudioOutputController. The
- // following methods are called on the audio manager thread.
- class MEDIA_EXPORT EventHandler {
- public:
- virtual void OnCreated(AudioOutputController* controller) = 0;
- virtual void OnPlaying(AudioOutputController* controller) = 0;
- virtual void OnPaused(AudioOutputController* controller) = 0;
- virtual void OnError(AudioOutputController* controller, int error_code) = 0;
-
- protected:
- virtual ~EventHandler() {}
- };
-
- // A synchronous reader interface used by AudioOutputController for
- // synchronous reading.
- // TODO(crogers): find a better name for this class and the Read() method
- // now that it can handle synchronized I/O.
- class SyncReader {
- public:
- virtual ~SyncReader() {}
-
- // Notify the synchronous reader the number of bytes in the
- // AudioOutputController not yet played. This is used by SyncReader to
- // prepare more data and perform synchronization.
- virtual void UpdatePendingBytes(uint32 bytes) = 0;
-
- // Attempt to completely fill |dest|, return the actual number of
- // frames that could be read.
- // |source| may optionally be provided for input data.
- virtual int Read(AudioBus* source, AudioBus* dest) = 0;
-
- // Close this synchronous reader.
- virtual void Close() = 0;
-
- // Check if data is ready.
- virtual bool DataReady() = 0;
- };
-
- // Factory method for creating an AudioOutputController.
- // This also creates and opens an AudioOutputStream on the audio manager
- // thread, and if this is successful, the |event_handler| will receive an
- // OnCreated() call from the same audio manager thread. |audio_manager| must
- // outlive AudioOutputController.
- static scoped_refptr<AudioOutputController> Create(
- AudioManager* audio_manager, EventHandler* event_handler,
- const AudioParameters& params, SyncReader* sync_reader);
-
- // Methods to control playback of the stream.
-
- // Starts the playback of this audio output stream.
- void Play();
-
- // Pause this audio output stream.
- void Pause();
-
- // Discard all audio data buffered in this output stream. This method only
- // has effect when the stream is paused.
- void Flush();
-
- // Closes the audio output stream. The state is changed and the resources
- // are freed on the audio manager thread. closed_task is executed after that.
- // Callbacks (EventHandler and SyncReader) must exist until closed_task is
- // called.
- //
- // It is safe to call this method more than once. Calls after the first one
- // will have no effect.
- void Close(const base::Closure& closed_task);
-
- // Sets the volume of the audio output stream.
- void SetVolume(double volume);
-
- // AudioSourceCallback implementation.
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
- virtual void OnError(AudioOutputStream* stream, int code) OVERRIDE;
- virtual void WaitTillDataReady() OVERRIDE;
-
- // AudioDeviceListener implementation. When called AudioOutputController will
- // shutdown the existing |stream_|, transition to the kRecreating state,
- // create a new stream, and then transition back to an equivalent state prior
- // to being called.
- virtual void OnDeviceChange() OVERRIDE;
-
- protected:
- // Internal state of the source.
- enum State {
- kEmpty,
- kCreated,
- kPlaying,
- kStarting,
- kPausedWhenStarting,
- kPaused,
- kClosed,
- kError,
- kRecreating,
- };
-
- friend class base::RefCountedThreadSafe<AudioOutputController>;
- virtual ~AudioOutputController();
-
- private:
- // We are polling sync reader if data became available.
- static const int kPollNumAttempts;
- static const int kPollPauseInMilliseconds;
-
- AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
- const AudioParameters& params, SyncReader* sync_reader);
-
- // The following methods are executed on the audio manager thread.
- void DoCreate();
- void DoPlay();
- void PollAndStartIfDataReady();
- void DoPause();
- void DoFlush();
- void DoClose();
- void DoSetVolume(double volume);
- void DoReportError(int code);
-
- // Helper method that starts physical stream.
- void StartStream();
-
- // Helper method that stops, closes, and NULLs |*stream_|.
- // Signals event when done if it is not NULL.
- void DoStopCloseAndClearStream(base::WaitableEvent *done);
-
- AudioManager* audio_manager_;
-
- // |handler_| may be called only if |state_| is not kClosed.
- EventHandler* handler_;
- AudioOutputStream* stream_;
-
- // The current volume of the audio stream.
- double volume_;
-
- // |state_| is written on the audio manager thread and is read on the
- // hardware audio thread. These operations need to be locked. But lock
- // is not required for reading on the audio manager thread.
- State state_;
-
- // The |lock_| must be acquired whenever we access |state_| from a thread
- // other than the audio manager thread.
- base::Lock lock_;
-
- // SyncReader is used only in low latency mode for synchronous reading.
- SyncReader* sync_reader_;
-
- // The message loop of audio manager thread that this object runs on.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
-
- // When starting stream we wait for data to become available.
- // Number of times left.
- int number_polling_attempts_left_;
-
- AudioParameters params_;
-
- // Used to post delayed tasks to ourselves that we can cancel.
- // We don't want the tasks to hold onto a reference as it will slow down
- // shutdown and force it to wait for the most delayed task.
- // Also, if we're shutting down, we do not want to poll for more data.
- base::WeakPtrFactory<AudioOutputController> weak_this_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputController);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_CONTROLLER_H_
diff --git a/src/media/audio/audio_output_controller_unittest.cc b/src/media/audio/audio_output_controller_unittest.cc
deleted file mode 100644
index fe29ce5..0000000
--- a/src/media/audio/audio_output_controller_unittest.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/environment.h"
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/audio/audio_output_controller.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// TODO(vrk): These tests need to be rewritten! (crbug.com/112500)
-
-using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::DoAll;
-using ::testing::Exactly;
-using ::testing::InvokeWithoutArgs;
-using ::testing::NotNull;
-using ::testing::Return;
-
-namespace media {
-
-static const int kSampleRate = AudioParameters::kAudioCDSampleRate;
-static const int kBitsPerSample = 16;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
-static const int kSamplesPerPacket = kSampleRate / 10;
-static const int kHardwareBufferSize = kSamplesPerPacket *
- ChannelLayoutToChannelCount(kChannelLayout) * kBitsPerSample / 8;
-
-class MockAudioOutputControllerEventHandler
- : public AudioOutputController::EventHandler {
- public:
- MockAudioOutputControllerEventHandler() {}
-
- MOCK_METHOD1(OnCreated, void(AudioOutputController* controller));
- MOCK_METHOD1(OnPlaying, void(AudioOutputController* controller));
- MOCK_METHOD1(OnPaused, void(AudioOutputController* controller));
- MOCK_METHOD2(OnError, void(AudioOutputController* controller,
- int error_code));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioOutputControllerEventHandler);
-};
-
-class MockAudioOutputControllerSyncReader
- : public AudioOutputController::SyncReader {
- public:
- MockAudioOutputControllerSyncReader() {}
-
- MOCK_METHOD1(UpdatePendingBytes, void(uint32 bytes));
- MOCK_METHOD2(Read, int(AudioBus* source, AudioBus* dest));
- MOCK_METHOD0(Close, void());
- MOCK_METHOD0(DataReady, bool());
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioOutputControllerSyncReader);
-};
-
-ACTION_P(SignalEvent, event) {
- event->Signal();
-}
-
-// Custom action to clear a memory buffer.
-ACTION(ClearBuffer) {
- arg1->Zero();
-}
-
-// Closes AudioOutputController synchronously.
-static void CloseAudioController(AudioOutputController* controller) {
- controller->Close(MessageLoop::QuitClosure());
- MessageLoop::current()->Run();
-}
-
-class AudioOutputControllerTest : public testing::Test {
- public:
- AudioOutputControllerTest() {}
- virtual ~AudioOutputControllerTest() {}
-
- protected:
- MessageLoopForIO message_loop_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioOutputControllerTest);
-};
-
-TEST_F(AudioOutputControllerTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!audio_manager->HasAudioOutputDevices())
- return;
-
- MockAudioOutputControllerEventHandler event_handler;
-
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .Times(1);
-
- MockAudioOutputControllerSyncReader sync_reader;
- EXPECT_CALL(sync_reader, Close());
-
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioOutputController> controller =
- AudioOutputController::Create(
- audio_manager.get(), &event_handler, params, &sync_reader);
- ASSERT_TRUE(controller.get());
-
- // Close the controller immediately.
- CloseAudioController(controller);
-}
-
-TEST_F(AudioOutputControllerTest, PlayPauseClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!audio_manager->HasAudioOutputDevices())
- return;
-
- MockAudioOutputControllerEventHandler event_handler;
- base::WaitableEvent event(false, false);
- base::WaitableEvent pause_event(false, false);
-
- // If OnCreated is called then signal the event.
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
-
- // OnPlaying() will be called only once.
- EXPECT_CALL(event_handler, OnPlaying(NotNull()));
-
- MockAudioOutputControllerSyncReader sync_reader;
- EXPECT_CALL(sync_reader, UpdatePendingBytes(_))
- .Times(AtLeast(2));
- EXPECT_CALL(sync_reader, Read(_, _))
- .WillRepeatedly(DoAll(ClearBuffer(), SignalEvent(&event),
- Return(4)));
- EXPECT_CALL(sync_reader, DataReady())
- .WillRepeatedly(Return(true));
- EXPECT_CALL(event_handler, OnPaused(NotNull()))
- .WillOnce(InvokeWithoutArgs(&pause_event, &base::WaitableEvent::Signal));
- EXPECT_CALL(sync_reader, Close());
-
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioOutputController> controller =
- AudioOutputController::Create(
- audio_manager.get(), &event_handler, params, &sync_reader);
- ASSERT_TRUE(controller.get());
-
- // Wait for OnCreated() to be called.
- event.Wait();
-
- ASSERT_FALSE(pause_event.IsSignaled());
- controller->Play();
- controller->Pause();
- pause_event.Wait();
-
- // Now stop the controller.
- CloseAudioController(controller);
-}
-
-TEST_F(AudioOutputControllerTest, HardwareBufferTooLarge) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!audio_manager->HasAudioOutputDevices())
- return;
-
- // Create an audio device with a very large hardware buffer size.
- MockAudioOutputControllerEventHandler event_handler;
-
- MockAudioOutputControllerSyncReader sync_reader;
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
- kSampleRate, kBitsPerSample,
- kSamplesPerPacket * 1000);
- scoped_refptr<AudioOutputController> controller =
- AudioOutputController::Create(
- audio_manager.get(), &event_handler, params, &sync_reader);
-
- // Use assert because we don't stop the device and assume we can't
- // create one.
- ASSERT_FALSE(controller);
-}
-
-TEST_F(AudioOutputControllerTest, PlayPausePlayClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!audio_manager->HasAudioOutputDevices())
- return;
-
- MockAudioOutputControllerEventHandler event_handler;
- base::WaitableEvent event(false, false);
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
-
- // OnPlaying() will be called only once.
- base::WaitableEvent play_event(false, false);
- EXPECT_CALL(event_handler, OnPlaying(NotNull()))
- .WillOnce(InvokeWithoutArgs(&play_event, &base::WaitableEvent::Signal));
-
- // OnPaused() should never be called since the pause during kStarting is
- // dropped when the second play comes in.
- EXPECT_CALL(event_handler, OnPaused(NotNull()))
- .Times(0);
-
- MockAudioOutputControllerSyncReader sync_reader;
- EXPECT_CALL(sync_reader, UpdatePendingBytes(_))
- .Times(AtLeast(1));
- EXPECT_CALL(sync_reader, Read(_, _))
- .WillRepeatedly(DoAll(ClearBuffer(), SignalEvent(&event), Return(4)));
- EXPECT_CALL(sync_reader, DataReady())
- .WillRepeatedly(Return(true));
- EXPECT_CALL(sync_reader, Close());
-
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioOutputController> controller =
- AudioOutputController::Create(
- audio_manager.get(), &event_handler, params, &sync_reader);
- ASSERT_TRUE(controller.get());
-
- // Wait for OnCreated() to be called.
- event.Wait();
-
- ASSERT_FALSE(play_event.IsSignaled());
- controller->Play();
- controller->Pause();
- controller->Play();
- play_event.Wait();
-
- // Now stop the controller.
- CloseAudioController(controller);
-}
-
-// Ensure state change events are handled.
-TEST_F(AudioOutputControllerTest, PlayStateChangeClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!audio_manager->HasAudioOutputDevices())
- return;
-
- MockAudioOutputControllerEventHandler event_handler;
- base::WaitableEvent event(false, false);
- EXPECT_CALL(event_handler, OnCreated(NotNull()))
- .WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
-
- // OnPlaying() will be called once normally and once after being recreated.
- base::WaitableEvent play_event(false, false);
- EXPECT_CALL(event_handler, OnPlaying(NotNull()))
- .Times(2)
- .WillRepeatedly(InvokeWithoutArgs(
- &play_event, &base::WaitableEvent::Signal));
-
- // OnPaused() should not be called during the state change event.
- EXPECT_CALL(event_handler, OnPaused(NotNull()))
- .Times(0);
-
- MockAudioOutputControllerSyncReader sync_reader;
- EXPECT_CALL(sync_reader, UpdatePendingBytes(_))
- .Times(AtLeast(1));
- EXPECT_CALL(sync_reader, Read(_, _))
- .WillRepeatedly(DoAll(ClearBuffer(), SignalEvent(&event), Return(4)));
- EXPECT_CALL(sync_reader, DataReady())
- .WillRepeatedly(Return(true));
- EXPECT_CALL(sync_reader, Close());
-
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout,
- kSampleRate, kBitsPerSample, kSamplesPerPacket);
- scoped_refptr<AudioOutputController> controller =
- AudioOutputController::Create(
- audio_manager.get(), &event_handler, params, &sync_reader);
- ASSERT_TRUE(controller.get());
-
- // Wait for OnCreated() to be called.
- event.Wait();
-
- ASSERT_FALSE(play_event.IsSignaled());
- controller->Play();
- play_event.Wait();
-
- // Force a state change and wait for the stream to come back to playing state.
- play_event.Reset();
- audio_manager->GetMessageLoop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputController::OnDeviceChange, controller));
- play_event.Wait();
-
- // Now stop the controller.
- CloseAudioController(controller);
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_device.cc b/src/media/audio/audio_output_device.cc
deleted file mode 100644
index 8ad93ff..0000000
--- a/src/media/audio/audio_output_device.cc
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_device.h"
-
-#include "base/debug/trace_event.h"
-#include "base/message_loop.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/time.h"
-#include "media/audio/audio_output_controller.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/shared_memory_util.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-// Takes care of invoking the render callback on the audio thread.
-// An instance of this class is created for each capture stream in
-// OnStreamCreated().
-class AudioOutputDevice::AudioThreadCallback
- : public AudioDeviceThread::Callback {
- public:
- AudioThreadCallback(const AudioParameters& audio_parameters,
- int input_channels,
- base::SharedMemoryHandle memory,
- int memory_length,
- AudioRendererSink::RenderCallback* render_callback);
- virtual ~AudioThreadCallback();
-
- virtual void MapSharedMemory() OVERRIDE;
-
- // Called whenever we receive notifications about pending data.
- virtual void Process(int pending_data) OVERRIDE;
-
- private:
- AudioRendererSink::RenderCallback* render_callback_;
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
- DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
-};
-
-AudioOutputDevice::AudioOutputDevice(
- AudioOutputIPC* ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
- input_channels_(0),
- callback_(NULL),
- ipc_(ipc),
- state_(IDLE),
- play_on_start_(true),
- stopping_hack_(false) {
- CHECK(ipc_);
- stream_id_ = ipc_->AddDelegate(this);
-}
-
-void AudioOutputDevice::Initialize(const AudioParameters& params,
- RenderCallback* callback) {
- DCHECK(!callback_) << "Calling Initialize() twice?";
- audio_parameters_ = params;
- callback_ = callback;
-}
-
-void AudioOutputDevice::InitializeIO(const AudioParameters& params,
- int input_channels,
- RenderCallback* callback) {
- DCHECK_GE(input_channels, 0);
- DCHECK_LT(input_channels, limits::kMaxChannels);
- input_channels_ = input_channels;
- Initialize(params, callback);
-}
-
-AudioOutputDevice::~AudioOutputDevice() {
- // The current design requires that the user calls Stop() before deleting
- // this class.
- DCHECK(audio_thread_.IsStopped());
-
- if (ipc_)
- ipc_->RemoveDelegate(stream_id_);
-}
-
-void AudioOutputDevice::Start() {
- DCHECK(callback_) << "Initialize hasn't been called";
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputDevice::CreateStreamOnIOThread, this,
- audio_parameters_, input_channels_));
-}
-
-void AudioOutputDevice::Stop() {
- {
- base::AutoLock auto_lock(audio_thread_lock_);
- audio_thread_.Stop(MessageLoop::current());
- stopping_hack_ = true;
- }
-
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputDevice::ShutDownOnIOThread, this));
-}
-
-void AudioOutputDevice::Play() {
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputDevice::PlayOnIOThread, this));
-}
-
-void AudioOutputDevice::Pause(bool flush) {
- message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputDevice::PauseOnIOThread, this, flush));
-}
-
-bool AudioOutputDevice::SetVolume(double volume) {
- if (volume < 0 || volume > 1.0)
- return false;
-
- if (!message_loop()->PostTask(FROM_HERE,
- base::Bind(&AudioOutputDevice::SetVolumeOnIOThread, this, volume))) {
- return false;
- }
-
- return true;
-}
-
-void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params,
- int input_channels) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (state_ == IDLE) {
- state_ = CREATING_STREAM;
- ipc_->CreateStream(stream_id_, params, input_channels);
- }
-}
-
-void AudioOutputDevice::PlayOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (state_ == PAUSED) {
- ipc_->PlayStream(stream_id_);
- state_ = PLAYING;
- play_on_start_ = false;
- } else {
- play_on_start_ = true;
- }
-}
-
-void AudioOutputDevice::PauseOnIOThread(bool flush) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (state_ == PLAYING) {
- ipc_->PauseStream(stream_id_);
- if (flush)
- ipc_->FlushStream(stream_id_);
- state_ = PAUSED;
- } else {
- // Note that |flush| isn't relevant here since this is the case where
- // the stream is first starting.
- }
- play_on_start_ = false;
-}
-
-void AudioOutputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
-
- // Make sure we don't call shutdown more than once.
- if (state_ >= CREATING_STREAM) {
- ipc_->CloseStream(stream_id_);
- state_ = IDLE;
- }
-
- // We can run into an issue where ShutDownOnIOThread is called right after
- // OnStreamCreated is called in cases where Start/Stop are called before we
- // get the OnStreamCreated callback. To handle that corner case, we call
- // Stop(). In most cases, the thread will already be stopped.
- //
- // Another situation is when the IO thread goes away before Stop() is called
- // in which case, we cannot use the message loop to close the thread handle
- // and can't rely on the main thread existing either.
- base::AutoLock auto_lock_(audio_thread_lock_);
- base::ThreadRestrictions::ScopedAllowIO allow_io;
- audio_thread_.Stop(NULL);
- audio_callback_.reset();
- stopping_hack_ = false;
-}
-
-void AudioOutputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
- if (state_ >= CREATING_STREAM)
- ipc_->SetVolume(stream_id_, volume);
-}
-
-void AudioOutputDevice::OnStateChanged(AudioOutputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
-
- // Do nothing if the stream has been closed.
- if (state_ < CREATING_STREAM)
- return;
-
- if (state == AudioOutputIPCDelegate::kError) {
- DLOG(WARNING) << "AudioOutputDevice::OnStateChanged(kError)";
- // Don't dereference the callback object if the audio thread
- // is stopped or stopping. That could mean that the callback
- // object has been deleted.
- // TODO(tommi): Add an explicit contract for clearing the callback
- // object. Possibly require calling Initialize again or provide
- // a callback object via Start() and clear it in Stop().
- if (!audio_thread_.IsStopped())
- callback_->OnRenderError();
- }
-}
-
-void AudioOutputDevice::OnStreamCreated(
- base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) {
- DCHECK(message_loop()->BelongsToCurrentThread());
-#if defined(OS_WIN)
- DCHECK(handle);
- DCHECK(socket_handle);
-#elif defined(__LB_SHELL__) || defined(COBALT)
- DCHECK(handle.get());
-#else
- DCHECK_GE(handle.fd, 0);
- DCHECK_GE(socket_handle, 0);
-#endif
-
- if (state_ != CREATING_STREAM)
- return;
-
- // We can receive OnStreamCreated() on the IO thread after the client has
- // called Stop() but before ShutDownOnIOThread() is processed. In such a
- // situation |callback_| might point to freed memory. Instead of starting
- // |audio_thread_| do nothing and wait for ShutDownOnIOThread() to get called.
- //
- // TODO(scherkus): The real fix is to have sane ownership semantics. The fact
- // that |callback_| (which should own and outlive this object!) can point to
- // freed memory is a mess. AudioRendererSink should be non-refcounted so that
- // owners (WebRtcAudioDeviceImpl, AudioRendererImpl, etc...) can Stop() and
- // delete as they see fit. AudioOutputDevice should internally use WeakPtr
- // to handle teardown and thread hopping. See http://crbug.com/151051 for
- // details.
- base::AutoLock auto_lock(audio_thread_lock_);
- if (stopping_hack_)
- return;
-
- DCHECK(audio_thread_.IsStopped());
- audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback(
- audio_parameters_, input_channels_, handle, length, callback_));
- audio_thread_.Start(audio_callback_.get(), socket_handle,
- "AudioOutputDevice");
- state_ = PAUSED;
-
- // We handle the case where Play() and/or Pause() may have been called
- // multiple times before OnStreamCreated() gets called.
- if (play_on_start_)
- PlayOnIOThread();
-}
-
-void AudioOutputDevice::OnIPCClosed() {
- DCHECK(message_loop()->BelongsToCurrentThread());
- state_ = IPC_CLOSED;
- ipc_ = NULL;
-}
-
-void AudioOutputDevice::WillDestroyCurrentMessageLoop() {
- LOG(ERROR) << "IO loop going away before the audio device has been stopped";
- ShutDownOnIOThread();
-}
-
-// AudioOutputDevice::AudioThreadCallback
-
-AudioOutputDevice::AudioThreadCallback::AudioThreadCallback(
- const AudioParameters& audio_parameters,
- int input_channels,
- base::SharedMemoryHandle memory,
- int memory_length,
- AudioRendererSink::RenderCallback* render_callback)
- : AudioDeviceThread::Callback(audio_parameters,
- input_channels,
- memory,
- memory_length),
- render_callback_(render_callback) {
-}
-
-AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
-}
-
-void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
- shared_memory_.Map(TotalSharedMemorySizeInBytes(memory_length_));
-
- // Calculate output and input memory size.
- int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_);
- int frames = audio_parameters_.frames_per_buffer();
- int input_memory_size =
- AudioBus::CalculateMemorySize(input_channels_, frames);
-
- int io_size = output_memory_size + input_memory_size;
-
- DCHECK_EQ(memory_length_, io_size);
-
- output_bus_ =
- AudioBus::WrapMemory(audio_parameters_, shared_memory_.memory());
-
- if (input_channels_ > 0) {
- // The input data is after the output data.
- char* input_data =
- static_cast<char*>(shared_memory_.memory()) + output_memory_size;
- input_bus_ =
- AudioBus::WrapMemory(input_channels_, frames, input_data);
- }
-}
-
-// Called whenever we receive notifications about pending data.
-void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
- if (pending_data == kPauseMark) {
- memset(shared_memory_.memory(), 0, memory_length_);
- SetActualDataSizeInBytes(&shared_memory_, memory_length_, 0);
- return;
- }
-
- // Convert the number of pending bytes in the render buffer
- // into milliseconds.
- int audio_delay_milliseconds = pending_data / bytes_per_ms_;
-
- TRACE_EVENT0("audio", "AudioOutputDevice::FireRenderCallback");
-
- // Update the audio-delay measurement then ask client to render audio. Since
- // |output_bus_| is wrapping the shared memory the Render() call is writing
- // directly into the shared memory.
- size_t num_frames = audio_parameters_.frames_per_buffer();
-
- if (input_bus_.get() && input_channels_ > 0) {
- render_callback_->RenderIO(input_bus_.get(),
- output_bus_.get(),
- audio_delay_milliseconds);
- } else {
- num_frames = render_callback_->Render(output_bus_.get(),
- audio_delay_milliseconds);
- }
-
- // Let the host know we are done.
- // TODO(dalecurtis): Technically this is not always correct. Due to channel
- // padding for alignment, there may be more data available than this. We're
- // relying on AudioSyncReader::Read() to parse this with that in mind. Rename
- // these methods to Set/GetActualFrameCount().
- SetActualDataSizeInBytes(
- &shared_memory_, memory_length_,
- num_frames * sizeof(*output_bus_->channel(0)) * output_bus_->channels());
-}
-
-} // namespace media.
diff --git a/src/media/audio/audio_output_device.h b/src/media/audio/audio_output_device.h
deleted file mode 100644
index 6650028..0000000
--- a/src/media/audio/audio_output_device.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Audio rendering unit utilizing audio output stream provided by browser
-// process through IPC.
-//
-// Relationship of classes.
-//
-// AudioOutputController AudioOutputDevice
-// ^ ^
-// | |
-// v IPC v
-// AudioRendererHost <---------> AudioOutputIPC (AudioMessageFilter)
-//
-// Transportation of audio samples from the render to the browser process
-// is done by using shared memory in combination with a sync socket pair
-// to generate a low latency transport. The AudioOutputDevice user registers an
-// AudioOutputDevice::RenderCallback at construction and will be polled by the
-// AudioOutputDevice for audio to be played out by the underlying audio layers.
-//
-// State sequences.
-//
-// Task [IO thread] IPC [IO thread]
-//
-// Start -> CreateStreamOnIOThread -----> CreateStream ------>
-// <- OnStreamCreated <- AudioMsg_NotifyStreamCreated <-
-// ---> PlayOnIOThread -----------> PlayStream -------->
-//
-// Optionally Play() / Pause() sequences may occur:
-// Play -> PlayOnIOThread --------------> PlayStream --------->
-// Pause -> PauseOnIOThread ------------> PauseStream -------->
-// (note that Play() / Pause() sequences before OnStreamCreated are
-// deferred until OnStreamCreated, with the last valid state being used)
-//
-// AudioOutputDevice::Render => audio transport on audio thread =>
-// |
-// Stop --> ShutDownOnIOThread --------> CloseStream -> Close
-//
-// This class utilizes several threads during its lifetime, namely:
-// 1. Creating thread.
-// Must be the main render thread.
-// 2. Control thread (may be the main render thread or another thread).
-// The methods: Start(), Stop(), Play(), Pause(), SetVolume()
-// must be called on the same thread.
-// 3. IO thread (internal implementation detail - not exposed to public API)
-// The thread within which this class receives all the IPC messages and
-// IPC communications can only happen in this thread.
-// 4. Audio transport thread (See AudioDeviceThread).
-// Responsible for calling the AudioThreadCallback implementation that in
-// turn calls AudioRendererSink::RenderCallback which feeds audio samples to
-// the audio layer in the browser process using sync sockets and shared
-// memory.
-//
-// Implementation notes:
-// - The user must call Stop() before deleting the class instance.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/shared_memory.h"
-#include "media/base/media_export.h"
-#include "media/audio/audio_device_thread.h"
-#include "media/audio/audio_output_ipc.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
-#include "media/base/audio_renderer_sink.h"
-
-namespace media {
-
-class MEDIA_EXPORT AudioOutputDevice
- : NON_EXPORTED_BASE(public AudioRendererSink),
- public AudioOutputIPCDelegate,
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
- public:
- // AudioRendererSink implementation.
- virtual void Initialize(const AudioParameters& params,
- RenderCallback* callback) OVERRIDE;
- virtual void InitializeIO(const AudioParameters& params,
- int input_channels,
- RenderCallback* callback) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Play() OVERRIDE;
- virtual void Pause(bool flush) OVERRIDE;
- virtual bool SetVolume(double volume) OVERRIDE;
-
- // Methods called on IO thread ----------------------------------------------
- // AudioOutputIPCDelegate methods.
- virtual void OnStateChanged(AudioOutputIPCDelegate::State state) OVERRIDE;
- virtual void OnStreamCreated(base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) OVERRIDE;
- virtual void OnIPCClosed() OVERRIDE;
-
- // Creates an uninitialized AudioOutputDevice. Clients must call Initialize()
- // before using.
- AudioOutputDevice(AudioOutputIPC* ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
-
- protected:
- // Magic required by ref_counted.h to avoid any code deleting the object
- // accidentally while there are references to it.
- friend class base::RefCountedThreadSafe<AudioOutputDevice>;
- virtual ~AudioOutputDevice();
-
- // Accessors for subclasses (via IO thread only).
- int stream_id() const { return stream_id_; }
- AudioOutputIPC* audio_output_ipc() const { return ipc_; }
-
- private:
- // Note: The ordering of members in this enum is critical to correct behavior!
- enum State {
- IPC_CLOSED, // No more IPCs can take place.
- IDLE, // Not started.
- CREATING_STREAM, // Waiting for OnStreamCreated() to be called back.
- PAUSED, // Paused. OnStreamCreated() has been called. Can Play()/Stop().
- PLAYING, // Playing back. Can Pause()/Stop().
- };
-
- // Methods called on IO thread ----------------------------------------------
- // The following methods are tasks posted on the IO thread that needs to
- // be executed on that thread. They interact with AudioMessageFilter and
- // sends IPC messages on that thread.
- void CreateStreamOnIOThread(const AudioParameters& params,
- int input_channels);
- void PlayOnIOThread();
- void PauseOnIOThread(bool flush);
- void ShutDownOnIOThread();
- void SetVolumeOnIOThread(double volume);
-
- // MessageLoop::DestructionObserver implementation for the IO loop.
- // If the IO loop dies before we do, we shut down the audio thread from here.
- virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
-
- AudioParameters audio_parameters_;
-
- // The number of optional synchronized input channels having the same
- // sample-rate and buffer-size as specified in audio_parameters_.
- int input_channels_;
-
- RenderCallback* callback_;
-
- // A pointer to the IPC layer that takes care of sending requests over to
- // the AudioRendererHost.
- AudioOutputIPC* ipc_;
-
- // Our stream ID on the message filter. Only accessed on the IO thread.
- // Must only be modified on the IO thread.
- int stream_id_;
-
- // Current state (must only be accessed from the IO thread). See comments for
- // State enum above.
- State state_;
-
- // State of Play() / Pause() calls before OnStreamCreated() is called.
- bool play_on_start_;
-
- // Our audio thread callback class. See source file for details.
- class AudioThreadCallback;
-
- // In order to avoid a race between OnStreamCreated and Stop(), we use this
- // guard to control stopping and starting the audio thread.
- base::Lock audio_thread_lock_;
- AudioDeviceThread audio_thread_;
- scoped_ptr<AudioOutputDevice::AudioThreadCallback> audio_callback_;
-
- // Temporary hack to ignore OnStreamCreated() due to the user calling Stop()
- // so we don't start the audio thread pointing to a potentially freed
- // |callback_|.
- //
- // TODO(scherkus): Replace this by changing AudioRendererSink to either accept
- // the callback via Start(). See http://crbug.com/151051 for details.
- bool stopping_hack_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputDevice);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DEVICE_H_
diff --git a/src/media/audio/audio_output_device_unittest.cc b/src/media/audio/audio_output_device_unittest.cc
deleted file mode 100644
index 70e2a49..0000000
--- a/src/media/audio/audio_output_device_unittest.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/at_exit.h"
-#include "base/message_loop.h"
-#include "base/process_util.h"
-#include "base/shared_memory.h"
-#include "base/sync_socket.h"
-#include "base/test/test_timeouts.h"
-#include "media/audio/audio_output_device.h"
-#include "media/audio/sample_rates.h"
-#include "media/audio/shared_memory_util.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gmock_mutant.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::CancelableSyncSocket;
-using base::SharedMemory;
-using base::SyncSocket;
-using testing::_;
-using testing::DoAll;
-using testing::Invoke;
-using testing::Return;
-using testing::WithArgs;
-using testing::StrictMock;
-using testing::Values;
-
-namespace media {
-
-namespace {
-
-class MockRenderCallback : public AudioRendererSink::RenderCallback {
- public:
- MockRenderCallback() {}
- virtual ~MockRenderCallback() {}
-
- MOCK_METHOD2(Render, int(AudioBus* dest, int audio_delay_milliseconds));
- MOCK_METHOD3(RenderIO, void(AudioBus* source,
- AudioBus* dest,
- int audio_delay_milliseconds));
- MOCK_METHOD0(OnRenderError, void());
-};
-
-class MockAudioOutputIPC : public AudioOutputIPC {
- public:
- MockAudioOutputIPC() {}
- virtual ~MockAudioOutputIPC() {}
-
- MOCK_METHOD1(AddDelegate, int(AudioOutputIPCDelegate* delegate));
- MOCK_METHOD1(RemoveDelegate, void(int stream_id));
-
- MOCK_METHOD3(CreateStream,
- void(int stream_id, const AudioParameters& params, int input_channels));
- MOCK_METHOD1(PlayStream, void(int stream_id));
- MOCK_METHOD1(CloseStream, void(int stream_id));
- MOCK_METHOD2(SetVolume, void(int stream_id, double volume));
- MOCK_METHOD1(PauseStream, void(int stream_id));
- MOCK_METHOD1(FlushStream, void(int stream_id));
-};
-
-// Creates a copy of a SyncSocket handle that we can give to AudioOutputDevice.
-// On Windows this means duplicating the pipe handle so that AudioOutputDevice
-// can call CloseHandle() (since ownership has been transferred), but on other
-// platforms, we just copy the same socket handle since AudioOutputDevice on
-// those platforms won't actually own the socket (FileDescriptor.auto_close is
-// false).
-bool DuplicateSocketHandle(SyncSocket::Handle socket_handle,
- SyncSocket::Handle* copy) {
-#if defined(OS_WIN)
- HANDLE process = GetCurrentProcess();
- ::DuplicateHandle(process, socket_handle, process, copy,
- 0, FALSE, DUPLICATE_SAME_ACCESS);
- return *copy != NULL;
-#else
- *copy = socket_handle;
- return *copy != -1;
-#endif
-}
-
-ACTION_P2(SendPendingBytes, socket, pending_bytes) {
- socket->Send(&pending_bytes, sizeof(pending_bytes));
-}
-
-// Used to terminate a loop from a different thread than the loop belongs to.
-// |loop| should be a MessageLoopProxy.
-ACTION_P(QuitLoop, loop) {
- loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
-}
-
-} // namespace.
-
-class AudioOutputDeviceTest
- : public testing::Test,
- public testing::WithParamInterface<bool> {
- public:
- AudioOutputDeviceTest();
- ~AudioOutputDeviceTest();
-
- void StartAudioDevice();
- void CreateStream();
- void ExpectRenderCallback();
- void WaitUntilRenderCallback();
- void StopAudioDevice();
-
- protected:
- // Used to clean up TLS pointers that the test(s) will initialize.
- // Must remain the first member of this class.
- base::ShadowingAtExitManager at_exit_manager_;
- MessageLoopForIO io_loop_;
- const AudioParameters default_audio_parameters_;
- StrictMock<MockRenderCallback> callback_;
- StrictMock<MockAudioOutputIPC> audio_output_ipc_;
- scoped_refptr<AudioOutputDevice> audio_device_;
-
- private:
- int CalculateMemorySize();
-
- const bool synchronized_io_;
- const int input_channels_;
- SharedMemory shared_memory_;
- CancelableSyncSocket browser_socket_;
- CancelableSyncSocket renderer_socket_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputDeviceTest);
-};
-
-static const int kStreamId = 123;
-
-int AudioOutputDeviceTest::CalculateMemorySize() {
- // Calculate output and input memory size.
- int output_memory_size =
- AudioBus::CalculateMemorySize(default_audio_parameters_);
-
- int frames = default_audio_parameters_.frames_per_buffer();
- int input_memory_size =
- AudioBus::CalculateMemorySize(input_channels_, frames);
-
- int io_buffer_size = output_memory_size + input_memory_size;
-
- // This is where it gets a bit hacky. The shared memory contract between
- // AudioOutputDevice and its browser side counter part includes a bit more
- // than just the audio data, so we must call TotalSharedMemorySizeInBytes()
- // to get the actual size needed to fit the audio data plus the extra data.
- return TotalSharedMemorySizeInBytes(io_buffer_size);
-}
-
-AudioOutputDeviceTest::AudioOutputDeviceTest()
- : default_audio_parameters_(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO,
- 48000, 16, 1024),
- synchronized_io_(GetParam()),
- input_channels_(synchronized_io_ ? 2 : 0) {
- EXPECT_CALL(audio_output_ipc_, AddDelegate(_))
- .WillOnce(Return(kStreamId));
-
- audio_device_ = new AudioOutputDevice(
- &audio_output_ipc_, io_loop_.message_loop_proxy());
-
- if (synchronized_io_) {
- audio_device_->InitializeIO(default_audio_parameters_,
- input_channels_,
- &callback_);
- } else {
- audio_device_->Initialize(default_audio_parameters_,
- &callback_);
- }
- io_loop_.RunUntilIdle();
-}
-
-AudioOutputDeviceTest::~AudioOutputDeviceTest() {
- EXPECT_CALL(audio_output_ipc_, RemoveDelegate(kStreamId));
-
- audio_device_ = NULL;
-}
-
-void AudioOutputDeviceTest::StartAudioDevice() {
- audio_device_->Start();
-
- EXPECT_CALL(audio_output_ipc_, CreateStream(kStreamId, _, _));
-
- io_loop_.RunUntilIdle();
-}
-
-void AudioOutputDeviceTest::CreateStream() {
- const int kMemorySize = CalculateMemorySize();
-
- ASSERT_TRUE(shared_memory_.CreateAndMapAnonymous(kMemorySize));
- memset(shared_memory_.memory(), 0xff, kMemorySize);
-
- ASSERT_TRUE(CancelableSyncSocket::CreatePair(&browser_socket_,
- &renderer_socket_));
-
- // Create duplicates of the handles we pass to AudioOutputDevice since
- // ownership will be transferred and AudioOutputDevice is responsible for
- // freeing.
- SyncSocket::Handle audio_device_socket = SyncSocket::kInvalidHandle;
- ASSERT_TRUE(DuplicateSocketHandle(renderer_socket_.handle(),
- &audio_device_socket));
- base::SharedMemoryHandle duplicated_memory_handle;
- ASSERT_TRUE(shared_memory_.ShareToProcess(base::GetCurrentProcessHandle(),
- &duplicated_memory_handle));
-
- audio_device_->OnStreamCreated(duplicated_memory_handle, audio_device_socket,
- PacketSizeInBytes(kMemorySize));
- io_loop_.RunUntilIdle();
-}
-
-void AudioOutputDeviceTest::ExpectRenderCallback() {
- // We should get a 'play' notification when we call OnStreamCreated().
- // Respond by asking for some audio data. This should ask our callback
- // to provide some audio data that AudioOutputDevice then writes into the
- // shared memory section.
- const int kMemorySize = CalculateMemorySize();
-
- EXPECT_CALL(audio_output_ipc_, PlayStream(kStreamId))
- .WillOnce(SendPendingBytes(&browser_socket_, kMemorySize));
-
- // We expect calls to our audio renderer callback, which returns the number
- // of frames written to the memory section.
- // Here's the second place where it gets hacky: There's no way for us to
- // know (without using a sleep loop!) when the AudioOutputDevice has finished
- // writing the interleaved audio data into the shared memory section.
- // So, for the sake of this test, we consider the call to Render a sign
- // of success and quit the loop.
- if (synchronized_io_) {
- // For synchronized I/O, we expect RenderIO().
- EXPECT_CALL(callback_, RenderIO(_, _, _))
- .WillOnce(QuitLoop(io_loop_.message_loop_proxy()));
- } else {
- // For output only we expect Render().
- const int kNumberOfFramesToProcess = 0;
- EXPECT_CALL(callback_, Render(_, _))
- .WillOnce(DoAll(
- QuitLoop(io_loop_.message_loop_proxy()),
- Return(kNumberOfFramesToProcess)));
- }
-}
-
-void AudioOutputDeviceTest::WaitUntilRenderCallback() {
- // Don't hang the test if we never get the Render() callback.
- io_loop_.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- io_loop_.Run();
-}
-
-void AudioOutputDeviceTest::StopAudioDevice() {
- audio_device_->Stop();
-
- EXPECT_CALL(audio_output_ipc_, CloseStream(kStreamId));
-
- io_loop_.RunUntilIdle();
-}
-
-TEST_P(AudioOutputDeviceTest, Initialize) {
- // Tests that the object can be constructed, initialized and destructed
- // without having ever been started/stopped.
-}
-
-// Calls Start() followed by an immediate Stop() and check for the basic message
-// filter messages being sent in that case.
-TEST_P(AudioOutputDeviceTest, StartStop) {
- StartAudioDevice();
- StopAudioDevice();
-}
-
-// AudioOutputDevice supports multiple start/stop sequences.
-TEST_P(AudioOutputDeviceTest, StartStopStartStop) {
- StartAudioDevice();
- StopAudioDevice();
- StartAudioDevice();
- StopAudioDevice();
-}
-
-// Simulate receiving OnStreamCreated() prior to processing ShutDownOnIOThread()
-// on the IO loop.
-TEST_P(AudioOutputDeviceTest, StopBeforeRender) {
- StartAudioDevice();
-
- // Call Stop() but don't run the IO loop yet.
- audio_device_->Stop();
-
- // Expect us to shutdown IPC but not to render anything despite the stream
- // getting created.
- EXPECT_CALL(audio_output_ipc_, CloseStream(kStreamId));
- CreateStream();
-}
-
-// Full test with output only.
-TEST_P(AudioOutputDeviceTest, CreateStream) {
- StartAudioDevice();
- ExpectRenderCallback();
- CreateStream();
- WaitUntilRenderCallback();
- StopAudioDevice();
-}
-
-INSTANTIATE_TEST_CASE_P(Render, AudioOutputDeviceTest, Values(false));
-INSTANTIATE_TEST_CASE_P(RenderIO, AudioOutputDeviceTest, Values(true));
-
-} // namespace media.
diff --git a/src/media/audio/audio_output_dispatcher.cc b/src/media/audio/audio_output_dispatcher.cc
deleted file mode 100644
index bfd3fb8..0000000
--- a/src/media/audio/audio_output_dispatcher.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_dispatcher.h"
-
-#include "base/message_loop.h"
-
-namespace media {
-
-AudioOutputDispatcher::AudioOutputDispatcher(
- AudioManager* audio_manager,
- const AudioParameters& params)
- : audio_manager_(audio_manager),
- message_loop_(MessageLoop::current()),
- params_(params) {
- // We expect to be instantiated on the audio thread. Otherwise the
- // message_loop_ member will point to the wrong message loop!
- DCHECK(audio_manager->GetMessageLoop()->BelongsToCurrentThread());
-}
-
-AudioOutputDispatcher::~AudioOutputDispatcher() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_dispatcher.h b/src/media/audio/audio_output_dispatcher.h
deleted file mode 100644
index 6f8d86e..0000000
--- a/src/media/audio/audio_output_dispatcher.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// AudioOutputDispatcher is a single-threaded base class that dispatches
-// creation and deletion of audio output streams. AudioOutputProxy objects use
-// this class to allocate and recycle actual audio output streams. When playback
-// is started, the proxy calls StartStream() to get an output stream that it
-// uses to play audio. When playback is stopped, the proxy returns the stream
-// back to the dispatcher by calling StopStream().
-//
-// AudioManagerBase creates one specialization of AudioOutputDispatcher on the
-// audio thread for each possible set of audio parameters. I.e streams with
-// different parameters are managed independently. The AudioOutputDispatcher
-// instance is then deleted on the audio thread when the AudioManager shuts
-// down.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/timer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_parameters.h"
-
-class MessageLoop;
-
-namespace media {
-
-class AudioOutputProxy;
-
-class MEDIA_EXPORT AudioOutputDispatcher
- : public base::RefCountedThreadSafe<AudioOutputDispatcher> {
- public:
- AudioOutputDispatcher(AudioManager* audio_manager,
- const AudioParameters& params);
-
- // Called by AudioOutputProxy to open the stream.
- // Returns false, if it fails to open it.
- virtual bool OpenStream() = 0;
-
- // Called by AudioOutputProxy when the stream is started.
- // Uses |callback| to get source data and report errors, if any.
- // Does *not* take ownership of this callback.
- // Returns true if started successfully, false otherwise.
- virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) = 0;
-
- // Called by AudioOutputProxy when the stream is stopped.
- // Ownership of the |stream_proxy| is passed to the dispatcher.
- virtual void StopStream(AudioOutputProxy* stream_proxy) = 0;
-
- // Called by AudioOutputProxy when the volume is set.
- virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) = 0;
-
- // Called by AudioOutputProxy when the stream is closed.
- virtual void CloseStream(AudioOutputProxy* stream_proxy) = 0;
-
- // Called on the audio thread when the AudioManager is shutting down.
- virtual void Shutdown() = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
- friend class AudioOutputProxyTest;
-
- virtual ~AudioOutputDispatcher();
-
- // A no-reference-held pointer (we don't want circular references) back to the
- // AudioManager that owns this object.
- AudioManager* audio_manager_;
- MessageLoop* message_loop_;
- AudioParameters params_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcher);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_H_
diff --git a/src/media/audio/audio_output_dispatcher_impl.cc b/src/media/audio/audio_output_dispatcher_impl.cc
deleted file mode 100644
index d254278..0000000
--- a/src/media/audio/audio_output_dispatcher_impl.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_dispatcher_impl.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_util.h"
-
-namespace media {
-
-AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
- AudioManager* audio_manager,
- const AudioParameters& params,
- const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, params),
- pause_delay_(base::TimeDelta::FromMilliseconds(
- 2 * params.frames_per_buffer() *
- base::Time::kMillisecondsPerSecond / params.sample_rate())),
- paused_proxies_(0),
- ALLOW_THIS_IN_INITIALIZER_LIST(weak_this_(this)),
- close_timer_(FROM_HERE,
- close_delay,
- weak_this_.GetWeakPtr(),
- &AudioOutputDispatcherImpl::ClosePendingStreams) {
-}
-
-AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
- DCHECK(proxy_to_physical_map_.empty());
- DCHECK(idle_streams_.empty());
- DCHECK(pausing_streams_.empty());
-}
-
-bool AudioOutputDispatcherImpl::OpenStream() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- paused_proxies_++;
-
- // Ensure that there is at least one open stream.
- if (idle_streams_.empty() && !CreateAndOpenStream()) {
- paused_proxies_--;
- return false;
- }
-
- close_timer_.Reset();
- return true;
-}
-
-bool AudioOutputDispatcherImpl::StartStream(
- AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- if (idle_streams_.empty() && !CreateAndOpenStream())
- return false;
-
- AudioOutputStream* physical_stream = idle_streams_.back();
- DCHECK(physical_stream);
- idle_streams_.pop_back();
-
- DCHECK_GT(paused_proxies_, 0u);
- --paused_proxies_;
-
- close_timer_.Reset();
-
- // Schedule task to allocate streams for other proxies if we need to.
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &AudioOutputDispatcherImpl::OpenTask, weak_this_.GetWeakPtr()));
-
- double volume = 0;
- stream_proxy->GetVolume(&volume);
- physical_stream->SetVolume(volume);
- physical_stream->Start(callback);
- proxy_to_physical_map_[stream_proxy] = physical_stream;
- return true;
-}
-
-void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
- DCHECK(it != proxy_to_physical_map_.end());
- AudioOutputStream* physical_stream = it->second;
- proxy_to_physical_map_.erase(it);
-
- physical_stream->Stop();
-
- ++paused_proxies_;
-
- pausing_streams_.push_front(physical_stream);
-
- // Don't recycle stream until two buffers worth of time has elapsed.
- message_loop_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AudioOutputDispatcherImpl::StopStreamTask,
- weak_this_.GetWeakPtr()),
- pause_delay_);
-}
-
-void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
- if (it != proxy_to_physical_map_.end()) {
- AudioOutputStream* physical_stream = it->second;
- physical_stream->SetVolume(volume);
- }
-}
-
-void AudioOutputDispatcherImpl::StopStreamTask() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- if (pausing_streams_.empty())
- return;
-
- AudioOutputStream* stream = pausing_streams_.back();
- pausing_streams_.pop_back();
- idle_streams_.push_back(stream);
- close_timer_.Reset();
-}
-
-void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- while (!pausing_streams_.empty()) {
- idle_streams_.push_back(pausing_streams_.back());
- pausing_streams_.pop_back();
- }
-
- DCHECK_GT(paused_proxies_, 0u);
- paused_proxies_--;
-
- while (idle_streams_.size() > paused_proxies_) {
- idle_streams_.back()->Close();
- idle_streams_.pop_back();
- }
-}
-
-void AudioOutputDispatcherImpl::Shutdown() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // Cancel any pending tasks to close paused streams or create new ones.
- weak_this_.InvalidateWeakPtrs();
-
- // No AudioOutputProxy objects should hold a reference to us when we get
- // to this stage.
- DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
-
- AudioOutputStreamList::iterator it = idle_streams_.begin();
- for (; it != idle_streams_.end(); ++it)
- (*it)->Close();
- idle_streams_.clear();
-
- it = pausing_streams_.begin();
- for (; it != pausing_streams_.end(); ++it)
- (*it)->Close();
- pausing_streams_.clear();
-}
-
-bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(params_);
- if (!stream)
- return false;
-
- if (!stream->Open()) {
- stream->Close();
- return false;
- }
- idle_streams_.push_back(stream);
- return true;
-}
-
-void AudioOutputDispatcherImpl::OpenTask() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- // Make sure that we have at least one stream allocated if there
- // are paused streams.
- if (paused_proxies_ > 0 && idle_streams_.empty() &&
- pausing_streams_.empty()) {
- CreateAndOpenStream();
- }
-
- close_timer_.Reset();
-}
-
-// This method is called by |close_timer_|.
-void AudioOutputDispatcherImpl::ClosePendingStreams() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- while (!idle_streams_.empty()) {
- idle_streams_.back()->Close();
- idle_streams_.pop_back();
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_dispatcher_impl.h b/src/media/audio/audio_output_dispatcher_impl.h
deleted file mode 100644
index 0eaa651..0000000
--- a/src/media/audio/audio_output_dispatcher_impl.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// AudioOutputDispatcherImpl is an implementation of AudioOutputDispatcher.
-//
-// To avoid opening and closing audio devices more frequently than necessary,
-// each dispatcher has a pool of inactive physical streams. A stream is closed
-// only if it hasn't been used for a certain period of time (specified via the
-// constructor).
-//
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
-
-#include <list>
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
-#include "base/timer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_parameters.h"
-
-class MessageLoop;
-
-namespace media {
-
-class AudioOutputProxy;
-
-class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
- public:
- // |close_delay_ms| specifies delay after the stream is paused until
- // the audio device is closed.
- AudioOutputDispatcherImpl(AudioManager* audio_manager,
- const AudioParameters& params,
- const base::TimeDelta& close_delay);
-
- // Opens a new physical stream if there are no pending streams in
- // |idle_streams_|. Do not call Close() or Stop() if this method fails.
- virtual bool OpenStream() OVERRIDE;
-
- // If there are pending streams in |idle_streams_| then it reuses one of
- // them, otherwise creates a new one.
- virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) OVERRIDE;
-
- // Holds the physical stream temporarily in |pausing_streams_| and then
- // |stream| is added to the pool of pending streams (i.e. |idle_streams_|).
- virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
-
- virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) OVERRIDE;
-
- virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
-
- virtual void Shutdown() OVERRIDE;
-
- private:
- typedef std::map<AudioOutputProxy*, AudioOutputStream*> AudioStreamMap;
- friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
- virtual ~AudioOutputDispatcherImpl();
-
- friend class AudioOutputProxyTest;
-
- // Creates a new physical output stream, opens it and pushes to
- // |idle_streams_|. Returns false if the stream couldn't be created or
- // opened.
- bool CreateAndOpenStream();
-
- // A task scheduled by StartStream(). Opens a new stream and puts
- // it in |idle_streams_|.
- void OpenTask();
-
- // Before a stream is reused, it should sit idle for a bit. This task is
- // called once that time has elapsed.
- void StopStreamTask();
-
- // Called by |close_timer_|. Closes all pending streams.
- void ClosePendingStreams();
-
- base::TimeDelta pause_delay_;
- size_t paused_proxies_;
- typedef std::list<AudioOutputStream*> AudioOutputStreamList;
- AudioOutputStreamList idle_streams_;
- AudioOutputStreamList pausing_streams_;
-
- // Used to post delayed tasks to ourselves that we cancel inside Shutdown().
- base::WeakPtrFactory<AudioOutputDispatcherImpl> weak_this_;
- base::DelayTimer<AudioOutputDispatcherImpl> close_timer_;
-
- AudioStreamMap proxy_to_physical_map_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcherImpl);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_DISPATCHER_IMPL_H_
diff --git a/src/media/audio/audio_output_ipc.cc b/src/media/audio/audio_output_ipc.cc
deleted file mode 100644
index 233a3b8..0000000
--- a/src/media/audio/audio_output_ipc.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_ipc.h"
-
-namespace media {
-
-AudioOutputIPCDelegate::~AudioOutputIPCDelegate() {}
-
-AudioOutputIPC::~AudioOutputIPC() {}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_ipc.h b/src/media/audio/audio_output_ipc.h
deleted file mode 100644
index 8543cdc..0000000
--- a/src/media/audio/audio_output_ipc.h
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
-
-#include "base/shared_memory.h"
-#include "base/sync_socket.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Contains IPC notifications for the state of the server side
-// (AudioOutputController) audio state changes and when an AudioOutputController
-// has been created. Implemented by AudioOutputDevice.
-class MEDIA_EXPORT AudioOutputIPCDelegate {
- public:
- // Current status of the audio output stream in the browser process. Browser
- // sends information about the current playback state and error to the
- // renderer process using this type.
- enum State {
- kPlaying,
- kPaused,
- kError
- };
-
- // Called when state of an audio stream has changed.
- virtual void OnStateChanged(State state) = 0;
-
- // Called when an audio stream has been created.
- // The shared memory |handle| points to a memory section that's used to
- // transfer audio buffers from the AudioOutputIPCDelegate back to the
- // AudioRendererHost. The implementation of OnStreamCreated takes ownership.
- // The |socket_handle| is used by AudioRendererHost to signal requests for
- // audio data to be written into the shared memory. The AudioOutputIPCDelegate
- // must read from this socket and provide audio whenever data (search for
- // "pending_bytes") is received.
- virtual void OnStreamCreated(base::SharedMemoryHandle handle,
- base::SyncSocket::Handle socket_handle,
- int length) = 0;
-
- // Called when the AudioOutputIPC object is going away and/or when the IPC
- // channel has been closed and no more ipc requests can be made.
- // Implementations must clear any references to the AudioOutputIPC object
- // at this point.
- virtual void OnIPCClosed() = 0;
-
- protected:
- virtual ~AudioOutputIPCDelegate();
-};
-
-// Provides IPC functionality for an AudioOutputDevice. The implementation
-// should asynchronously deliver the messages to an AudioOutputController object
-// (or create one in the case of CreateStream()), that may live in a separate
-// process.
-class MEDIA_EXPORT AudioOutputIPC {
- public:
- // Registers an AudioOutputIPCDelegate and returns a |stream_id| that must
- // be used with all other IPC functions in this interface.
- virtual int AddDelegate(AudioOutputIPCDelegate* delegate) = 0;
-
- // Unregisters a delegate that was previously registered via a call to
- // AddDelegate(). The audio stream should be in a closed state prior to
- // calling this function.
- virtual void RemoveDelegate(int stream_id) = 0;
-
- // Sends a request to create an AudioOutputController object in the peer
- // process, identify it by |stream_id| and configure it to use the specified
- // audio |params| and number of synchronized input channels.
- // Once the stream has been created, the implementation must
- // generate a notification to the AudioOutputIPCDelegate and call
- // OnStreamCreated().
- virtual void CreateStream(int stream_id,
- const AudioParameters& params,
- int input_channels) = 0;
-
- // Starts playing the stream. This should generate a call to
- // AudioOutputController::Play().
- virtual void PlayStream(int stream_id) = 0;
-
- // Pauses an audio stream. This should generate a call to
- // AudioOutputController::Pause().
- virtual void PauseStream(int stream_id) = 0;
-
- // "Flushes" the audio device. This should generate a call to
- // AudioOutputController::Flush().
- // TODO(tommi): This is currently neither implemented nor called. Remove?
- virtual void FlushStream(int stream_id) = 0;
-
- // Closes the audio stream and deletes the matching AudioOutputController
- // instance. Prior to deleting the AudioOutputController object, a call to
- // AudioOutputController::Close must be made.
- virtual void CloseStream(int stream_id) = 0;
-
- // Sets the volume of the audio stream.
- virtual void SetVolume(int stream_id, double volume) = 0;
-
- protected:
- virtual ~AudioOutputIPC();
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_IPC_H_
diff --git a/src/media/audio/audio_output_mixer.cc b/src/media/audio/audio_output_mixer.cc
deleted file mode 100644
index edce4ea..0000000
--- a/src/media/audio/audio_output_mixer.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_mixer.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_util.h"
-
-namespace media {
-
-AudioOutputMixer::AudioOutputMixer(AudioManager* audio_manager,
- const AudioParameters& params,
- const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, params),
- ALLOW_THIS_IN_INITIALIZER_LIST(weak_this_(this)),
- close_timer_(FROM_HERE,
- close_delay,
- weak_this_.GetWeakPtr(),
- &AudioOutputMixer::ClosePhysicalStream),
- pending_bytes_(0) {
- // TODO(enal): align data.
- mixer_data_.reset(new uint8[params_.GetBytesPerBuffer()]);
-}
-
-AudioOutputMixer::~AudioOutputMixer() {
-}
-
-bool AudioOutputMixer::OpenStream() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- if (physical_stream_.get())
- return true;
- AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(params_);
- if (!stream)
- return false;
- if (!stream->Open()) {
- stream->Close();
- return false;
- }
- pending_bytes_ = 0; // Just in case.
- physical_stream_.reset(stream);
- close_timer_.Reset();
- return true;
-}
-
-bool AudioOutputMixer::StartStream(
- AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // May need to re-open the physical stream if no active proxies and
- // enough time had pass.
- OpenStream();
- if (!physical_stream_.get())
- return false;
-
- double volume = 0.0;
- stream_proxy->GetVolume(&volume);
- bool should_start = proxies_.empty();
- {
- base::AutoLock lock(lock_);
- ProxyData* proxy_data = &proxies_[stream_proxy];
- proxy_data->audio_source_callback = callback;
- proxy_data->volume = volume;
- proxy_data->pending_bytes = 0;
- }
- // We cannot start physical stream under the lock,
- // OnMoreData() would try acquiring it...
- if (should_start) {
- physical_stream_->SetVolume(1.0);
- physical_stream_->Start(this);
- }
- return true;
-}
-
-void AudioOutputMixer::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // Because of possible deadlock we cannot stop physical stream under the lock
- // (physical_stream_->Stop() can call OnError(), and it acquires the lock to
- // iterate through proxies), so acquire the lock, update proxy list, release
- // the lock, and only then stop physical stream if necessary.
- bool stop_physical_stream = false;
- {
- base::AutoLock lock(lock_);
- ProxyMap::iterator it = proxies_.find(stream_proxy);
- if (it != proxies_.end()) {
- proxies_.erase(it);
- stop_physical_stream = proxies_.empty();
- }
- }
- if (physical_stream_.get()) {
- if (stop_physical_stream) {
- physical_stream_->Stop();
- pending_bytes_ = 0; // Just in case.
- }
- close_timer_.Reset();
- }
-}
-
-void AudioOutputMixer::StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- ProxyMap::iterator it = proxies_.find(stream_proxy);
-
- // Do nothing if stream is not currently playing.
- if (it != proxies_.end()) {
- base::AutoLock lock(lock_);
- it->second.volume = volume;
- }
-}
-
-void AudioOutputMixer::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- StopStream(stream_proxy);
-}
-
-void AudioOutputMixer::Shutdown() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // Cancel any pending tasks to close physical stream.
- weak_this_.InvalidateWeakPtrs();
-
- while (!proxies_.empty()) {
- CloseStream(proxies_.begin()->first);
- }
- ClosePhysicalStream();
-
- // No AudioOutputProxy objects should hold a reference to us when we get
- // to this stage.
- DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
-}
-
-void AudioOutputMixer::ClosePhysicalStream() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- if (proxies_.empty() && physical_stream_.get() != NULL)
- physical_stream_.release()->Close();
-}
-
-// AudioSourceCallback implementation.
-uint32 AudioOutputMixer::OnMoreData(uint8* dest,
- uint32 max_size,
- AudioBuffersState buffers_state) {
- max_size = std::min(max_size,
- static_cast<uint32>(params_.GetBytesPerBuffer()));
- // TODO(enal): consider getting rid of lock as it is in time-critical code.
- // E.g. swap |proxies_| with local variable, and merge 2 lists
- // at the end. That would speed things up but complicate stopping
- // the stream.
- base::AutoLock lock(lock_);
-
- DCHECK_GE(pending_bytes_, buffers_state.pending_bytes);
- if (proxies_.empty()) {
- pending_bytes_ = buffers_state.pending_bytes;
- return 0;
- }
- uint32 actual_total_size = 0;
- uint32 bytes_per_sample = params_.bits_per_sample() >> 3;
-
- // Go through all the streams, getting data for every one of them
- // and mixing it into destination.
- // Minor optimization: for the first stream we are writing data directly into
- // destination. This way we don't have to mix the data when there is only one
- // active stream, and net win in other cases, too.
- bool first_stream = true;
- uint8* actual_dest = dest;
- for (ProxyMap::iterator it = proxies_.begin(); it != proxies_.end(); ++it) {
- ProxyData* proxy_data = &it->second;
-
- // If proxy's pending bytes are the same as pending bytes for combined
- // stream, both are either pre-buffering or in the steady state. In either
- // case new pending bytes for proxy is the same as new pending bytes for
- // combined stream.
- // Note: use >= instead of ==, that way is safer.
- if (proxy_data->pending_bytes >= pending_bytes_)
- proxy_data->pending_bytes = buffers_state.pending_bytes;
-
- // Note: there is no way we can deduce hardware_delay_bytes for the
- // particular proxy stream. Use zero instead.
- uint32 actual_size = proxy_data->audio_source_callback->OnMoreData(
- actual_dest,
- max_size,
- AudioBuffersState(proxy_data->pending_bytes, 0));
- if (actual_size == 0)
- continue;
- double volume = proxy_data->volume;
-
- // Different handling for first and all subsequent streams.
- if (first_stream) {
- if (volume != 1.0) {
- media::AdjustVolume(actual_dest,
- actual_size,
- params_.channels(),
- bytes_per_sample,
- volume);
- }
- if (actual_size < max_size)
- memset(dest + actual_size, 0, max_size - actual_size);
- first_stream = false;
- actual_dest = mixer_data_.get();
- actual_total_size = actual_size;
- } else {
- media::MixStreams(dest,
- actual_dest,
- actual_size,
- bytes_per_sample,
- volume);
- actual_total_size = std::max(actual_size, actual_total_size);
- }
- }
-
- // Now go through all proxies once again and increase pending_bytes
- // for each proxy. Could not do it earlier because we did not know
- // actual_total_size.
- for (ProxyMap::iterator it = proxies_.begin(); it != proxies_.end(); ++it) {
- it->second.pending_bytes += actual_total_size;
- }
- pending_bytes_ = buffers_state.pending_bytes + actual_total_size;
-
- return actual_total_size;
-}
-
-void AudioOutputMixer::OnError(AudioOutputStream* stream, int code) {
- base::AutoLock lock(lock_);
- for (ProxyMap::iterator it = proxies_.begin(); it != proxies_.end(); ++it) {
- it->second.audio_source_callback->OnError(it->first, code);
- }
-}
-
-void AudioOutputMixer::WaitTillDataReady() {
- base::AutoLock lock(lock_);
- for (ProxyMap::iterator it = proxies_.begin(); it != proxies_.end(); ++it) {
- it->second.audio_source_callback->WaitTillDataReady();
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_mixer.h b/src/media/audio/audio_output_mixer.h
deleted file mode 100644
index 4ddeeef..0000000
--- a/src/media/audio/audio_output_mixer.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// AudioOutputMixer is a class that implements browser-side audio mixer.
-// AudioOutputMixer implements both AudioOutputDispatcher and
-// AudioSourceCallback interfaces.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_MIXER_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_MIXER_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/timer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class MEDIA_EXPORT AudioOutputMixer
- : public AudioOutputDispatcher,
- public AudioOutputStream::AudioSourceCallback {
- public:
- AudioOutputMixer(AudioManager* audio_manager,
- const AudioParameters& params,
- const base::TimeDelta& close_delay);
-
- // AudioOutputDispatcher interface.
- virtual bool OpenStream() OVERRIDE;
- virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) OVERRIDE;
- virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void Shutdown() OVERRIDE;
-
- // AudioSourceCallback interface.
- virtual uint32 OnMoreData(uint8* dest,
- uint32 max_size,
- AudioBuffersState buffers_state) OVERRIDE;
- virtual void OnError(AudioOutputStream* stream, int code) OVERRIDE;
- virtual void WaitTillDataReady() OVERRIDE;
-
- private:
- friend class base::RefCountedThreadSafe<AudioOutputMixer>;
- virtual ~AudioOutputMixer();
-
- // Called by |close_timer_|. Closes physical stream.
- void ClosePhysicalStream();
-
- // The |lock_| must be acquired whenever we modify |proxies_| in the audio
- // manager thread or accessing it in the hardware audio thread. Read in the
- // audio manager thread is safe.
- base::Lock lock_;
-
- // List of audio output proxies currently being played.
- // For every proxy we store aux structure containing data necessary for
- // mixing.
- struct ProxyData {
- AudioOutputStream::AudioSourceCallback* audio_source_callback;
- double volume;
- int pending_bytes;
- };
- typedef std::map<AudioOutputProxy*, ProxyData> ProxyMap;
- ProxyMap proxies_;
-
- // Physical stream for this mixer.
- scoped_ptr<AudioOutputStream> physical_stream_;
-
- // Temporary buffer used when mixing. Allocated in the constructor
- // to avoid constant allocation/deallocation in the callback.
- scoped_array<uint8> mixer_data_;
-
- // Used to post delayed tasks to ourselves that we cancel inside Shutdown().
- base::WeakPtrFactory<AudioOutputMixer> weak_this_;
- base::DelayTimer<AudioOutputMixer> close_timer_;
-
- // Size of data in all in-flight buffers.
- int pending_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputMixer);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_MIXER_H_
diff --git a/src/media/audio/audio_output_proxy.cc b/src/media/audio/audio_output_proxy.cc
deleted file mode 100644
index 3609079..0000000
--- a/src/media/audio/audio_output_proxy.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_proxy.h"
-
-#include "base/logging.h"
-#include "base/message_loop.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_output_dispatcher.h"
-
-namespace media {
-
-AudioOutputProxy::AudioOutputProxy(AudioOutputDispatcher* dispatcher)
- : dispatcher_(dispatcher),
- state_(kCreated),
- volume_(1.0) {
-}
-
-AudioOutputProxy::~AudioOutputProxy() {
- DCHECK(CalledOnValidThread());
- DCHECK(state_ == kCreated || state_ == kClosed) << "State is: " << state_;
-}
-
-bool AudioOutputProxy::Open() {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(state_, kCreated);
-
- if (!dispatcher_->OpenStream()) {
- state_ = kOpenError;
- return false;
- }
-
- state_ = kOpened;
- return true;
-}
-
-void AudioOutputProxy::Start(AudioSourceCallback* callback) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(state_, kOpened);
-
- if (!dispatcher_->StartStream(callback, this)) {
- state_ = kStartError;
- callback->OnError(this, 0);
- return;
- }
- state_ = kPlaying;
-}
-
-void AudioOutputProxy::Stop() {
- DCHECK(CalledOnValidThread());
- if (state_ != kPlaying)
- return;
-
- dispatcher_->StopStream(this);
- state_ = kOpened;
-}
-
-void AudioOutputProxy::SetVolume(double volume) {
- DCHECK(CalledOnValidThread());
- volume_ = volume;
- dispatcher_->StreamVolumeSet(this, volume);
-}
-
-void AudioOutputProxy::GetVolume(double* volume) {
- DCHECK(CalledOnValidThread());
- *volume = volume_;
-}
-
-void AudioOutputProxy::Close() {
- DCHECK(CalledOnValidThread());
- DCHECK(state_ == kCreated || state_ == kOpenError || state_ == kOpened ||
- state_ == kStartError);
-
- // kStartError means OpenStream() succeeded and the stream must be closed
- // before destruction.
- if (state_ != kCreated && state_ != kOpenError)
- dispatcher_->CloseStream(this);
-
- state_ = kClosed;
-
- // Delete the object now like is done in the Close() implementation of
- // physical stream objects. If we delete the object via DeleteSoon, we
- // unnecessarily complicate the Shutdown procedure of the
- // dispatcher+audio manager.
- delete this;
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_proxy.h b/src/media/audio/audio_output_proxy.h
deleted file mode 100644
index 86dab51..0000000
--- a/src/media/audio/audio_output_proxy.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioOutputDispatcher;
-
-// AudioOutputProxy is an audio otput stream that uses resources more
-// efficiently than a regular audio output stream: it opens audio
-// device only when sound is playing, i.e. between Start() and Stop()
-// (there is still one physical stream per each audio output proxy in
-// playing state).
-//
-// AudioOutputProxy uses AudioOutputDispatcher to open and close
-// physical output streams.
-class MEDIA_EXPORT AudioOutputProxy
- : public AudioOutputStream,
- public NON_EXPORTED_BASE(base::NonThreadSafe) {
- public:
- // Caller keeps ownership of |dispatcher|.
- explicit AudioOutputProxy(AudioOutputDispatcher* dispatcher);
-
- // AudioOutputStream interface.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
- virtual void Close() OVERRIDE;
-
- private:
- enum State {
- kCreated,
- kOpened,
- kPlaying,
- kClosed,
- kOpenError,
- kStartError,
- };
-
- virtual ~AudioOutputProxy();
-
- scoped_refptr<AudioOutputDispatcher> dispatcher_;
- State state_;
-
- // Need to save volume here, so that we can restore it in case the stream
- // is stopped, and then started again.
- double volume_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputProxy);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_PROXY_H_
diff --git a/src/media/audio/audio_output_proxy_unittest.cc b/src/media/audio/audio_output_proxy_unittest.cc
deleted file mode 100644
index 6c13856..0000000
--- a/src/media/audio/audio_output_proxy_unittest.cc
+++ /dev/null
@@ -1,900 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/command_line.h"
-#include "base/message_loop.h"
-#include "base/message_loop_proxy.h"
-#include "media/audio/audio_output_dispatcher_impl.h"
-#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_output_resampler.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/fake_audio_output_stream.h"
-#include "media/base/media_switches.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// TODO(dalecurtis): Temporarily disabled while switching pipeline to use float,
-// http://crbug.com/114700
-#if defined(ENABLE_AUDIO_MIXER)
-#include "media/audio/audio_output_mixer.h"
-#endif
-
-using ::testing::_;
-using ::testing::AllOf;
-using ::testing::DoAll;
-using ::testing::Field;
-using ::testing::Mock;
-using ::testing::NotNull;
-using ::testing::Return;
-using ::testing::SetArrayArgument;
-using media::AudioBus;
-using media::AudioBuffersState;
-using media::AudioInputStream;
-using media::AudioManager;
-using media::AudioManagerBase;
-using media::AudioOutputDispatcher;
-using media::AudioOutputProxy;
-using media::AudioOutputStream;
-using media::AudioParameters;
-using media::FakeAudioOutputStream;
-
-namespace {
-
-static const int kTestCloseDelayMs = 100;
-
-// Used in the test where we don't want a stream to be closed unexpectedly.
-static const int kTestBigCloseDelaySeconds = 1000;
-
-// Delay between callbacks to AudioSourceCallback::OnMoreData.
-static const int kOnMoreDataCallbackDelayMs = 10;
-
-// Let start run long enough for many OnMoreData callbacks to occur.
-static const int kStartRunTimeMs = kOnMoreDataCallbackDelayMs * 10;
-
-class MockAudioOutputStream : public AudioOutputStream {
- public:
- MockAudioOutputStream(AudioManagerBase* manager,
- const AudioParameters& params)
- : start_called_(false),
- stop_called_(false),
- params_(params),
- fake_output_stream_(
- FakeAudioOutputStream::MakeFakeStream(manager, params_)) {
- }
-
- void Start(AudioSourceCallback* callback) {
- start_called_ = true;
- fake_output_stream_->Start(callback);
- }
-
- void Stop() {
- stop_called_ = true;
- fake_output_stream_->Stop();
- }
-
- ~MockAudioOutputStream() {}
-
- bool start_called() { return start_called_; }
- bool stop_called() { return stop_called_; }
-
- MOCK_METHOD0(Open, bool());
- MOCK_METHOD1(SetVolume, void(double volume));
- MOCK_METHOD1(GetVolume, void(double* volume));
- MOCK_METHOD0(Close, void());
-
- private:
- bool start_called_;
- bool stop_called_;
- AudioParameters params_;
- scoped_ptr<AudioOutputStream> fake_output_stream_;
-};
-
-class MockAudioManager : public AudioManagerBase {
- public:
- MockAudioManager() {}
- virtual ~MockAudioManager() {
- Shutdown();
- }
-
- MOCK_METHOD0(HasAudioOutputDevices, bool());
- MOCK_METHOD0(HasAudioInputDevices, bool());
- MOCK_METHOD0(GetAudioInputDeviceModel, string16());
- MOCK_METHOD1(MakeAudioOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD1(MakeAudioOutputStreamProxy, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
- MOCK_METHOD0(CanShowAudioInputSettings, bool());
- MOCK_METHOD0(ShowAudioInputSettings, void());
- MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
- MOCK_METHOD1(GetAudioInputDeviceNames, void(
- media::AudioDeviceNames* device_name));
- MOCK_METHOD0(IsRecordingInProcess, bool());
-
- MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
- MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
-};
-
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- int OnMoreData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
- audio_bus->Zero();
- return audio_bus->frames();
- }
- int OnMoreIOData(AudioBus* source, AudioBus* dest,
- AudioBuffersState buffers_state) {
- return OnMoreData(dest, buffers_state);
- }
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-} // namespace
-
-namespace media {
-
-class AudioOutputProxyTest : public testing::Test {
- protected:
- virtual void SetUp() {
- EXPECT_CALL(manager_, GetMessageLoop())
- .WillRepeatedly(Return(message_loop_.message_loop_proxy()));
- InitDispatcher(base::TimeDelta::FromMilliseconds(kTestCloseDelayMs));
- }
-
- virtual void TearDown() {
- // All paused proxies should have been closed at this point.
- EXPECT_EQ(0u, dispatcher_impl_->paused_proxies_);
-
- // This is necessary to free all proxy objects that have been
- // closed by the test.
- message_loop_.RunUntilIdle();
- }
-
- virtual void InitDispatcher(base::TimeDelta close_delay) {
- // Use a low sample rate and large buffer size when testing otherwise the
- // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
- // RunUntilIdle() will never terminate.
- params_ = AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 8000, 16, 2048);
- dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
- params_,
- close_delay);
-#if defined(ENABLE_AUDIO_MIXER)
- mixer_ = new AudioOutputMixer(&manager(), params_, close_delay);
-#endif
-
- // Necessary to know how long the dispatcher will wait before posting
- // StopStreamTask.
- pause_delay_ = dispatcher_impl_->pause_delay_;
- }
-
- virtual void OnStart() {}
-
- MockAudioManager& manager() {
- return manager_;
- }
-
- // Wait for the close timer to fire.
- void WaitForCloseTimer(const int timer_delay_ms) {
- message_loop_.RunUntilIdle(); // OpenTask() may reset the timer.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(timer_delay_ms) * 2);
- message_loop_.RunUntilIdle();
- }
-
- // Methods that do actual tests.
- void OpenAndClose(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
- }
-
- // Create a stream, and then calls Start() and Stop().
- void StartAndStop(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy->Open());
-
- proxy->Start(&callback_);
- OnStart();
- proxy->Stop();
-
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
- EXPECT_TRUE(stream.stop_called());
- EXPECT_TRUE(stream.start_called());
- }
-
- // Verify that the stream is closed after Stop is called.
- void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy->Open());
-
- proxy->Start(&callback_);
- OnStart();
- proxy->Stop();
-
- // Wait for StopStream() to post StopStreamTask().
- base::PlatformThread::Sleep(pause_delay_ * 2);
- WaitForCloseTimer(kTestCloseDelayMs);
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
-
- proxy->Close();
- EXPECT_TRUE(stream.stop_called());
- EXPECT_TRUE(stream.start_called());
- }
-
- // Create two streams, but don't start them. Only one device must be open.
- void TwoStreams(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
- proxy1->Close();
- proxy2->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
- EXPECT_FALSE(stream.stop_called());
- EXPECT_FALSE(stream.start_called());
- }
-
- // Open() method failed.
- void OpenFailed(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(false));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
- EXPECT_FALSE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
- EXPECT_FALSE(stream.stop_called());
- EXPECT_FALSE(stream.start_called());
- }
-
- void CreateAndWait(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy->Open());
-
- // Simulate a delay.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
- message_loop_.RunUntilIdle();
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
-
- proxy->Close();
- EXPECT_FALSE(stream.stop_called());
- EXPECT_FALSE(stream.start_called());
- }
-
- void TwoStreams_OnePlaying(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2));
-
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream1, Close())
- .Times(1);
-
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, Close())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
-
- proxy1->Start(&callback_);
- message_loop_.RunUntilIdle();
- OnStart();
- proxy1->Stop();
-
- proxy1->Close();
- proxy2->Close();
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_FALSE(stream2.stop_called());
- EXPECT_FALSE(stream2.start_called());
- }
-
- void TwoStreams_BothPlaying(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2));
-
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream1, Close())
- .Times(1);
-
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream2, Close())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(dispatcher);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(dispatcher);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
-
- proxy1->Start(&callback_);
- proxy2->Start(&callback_);
- OnStart();
- proxy1->Stop();
- proxy2->Stop();
-
- proxy1->Close();
- proxy2->Close();
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_TRUE(stream2.stop_called());
- EXPECT_TRUE(stream2.start_called());
- }
-
- void StartFailed(AudioOutputDispatcher* dispatcher) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher_impl_);
- EXPECT_TRUE(proxy->Open());
-
- // Simulate a delay.
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kTestCloseDelayMs) * 2);
- message_loop_.RunUntilIdle();
-
- // Verify expectation before calling Close().
- Mock::VerifyAndClear(&stream);
-
- // |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
-
- EXPECT_CALL(callback_, OnError(_, _))
- .Times(1);
-
- proxy->Start(&callback_);
-
- Mock::VerifyAndClear(&callback_);
-
- proxy->Close();
- }
-
- MessageLoop message_loop_;
- scoped_refptr<AudioOutputDispatcherImpl> dispatcher_impl_;
-#if defined(ENABLE_AUDIO_MIXER)
- scoped_refptr<AudioOutputMixer> mixer_;
-#endif
- base::TimeDelta pause_delay_;
- MockAudioManager manager_;
- MockAudioSourceCallback callback_;
- AudioParameters params_;
-};
-
-class AudioOutputResamplerTest : public AudioOutputProxyTest {
- public:
- virtual void TearDown() {
- AudioOutputProxyTest::TearDown();
- }
-
- virtual void InitDispatcher(base::TimeDelta close_delay) {
- AudioOutputProxyTest::InitDispatcher(close_delay);
- // Use a low sample rate and large buffer size when testing otherwise the
- // FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
- // RunUntilIdle() will never terminate.
- resampler_params_ = AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- 16000, 16, 1024);
- resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, close_delay);
- }
-
- virtual void OnStart() {
- // Let start run for a bit.
- message_loop_.RunUntilIdle();
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kStartRunTimeMs));
- }
-
- protected:
- AudioParameters resampler_params_;
- scoped_refptr<AudioOutputResampler> resampler_;
-};
-
-TEST_F(AudioOutputProxyTest, CreateAndClose) {
- AudioOutputProxy* proxy = new AudioOutputProxy(dispatcher_impl_);
- proxy->Close();
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, CreateAndClose_Mixer) {
- AudioOutputProxy* proxy = new AudioOutputProxy(mixer_);
- proxy->Close();
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, CreateAndClose) {
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
- proxy->Close();
-}
-
-TEST_F(AudioOutputProxyTest, OpenAndClose) {
- OpenAndClose(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, OpenAndClose_Mixer) {
- OpenAndClose(mixer_);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, OpenAndClose) {
- OpenAndClose(resampler_);
-}
-
-// Create a stream, and verify that it is closed after kTestCloseDelayMs.
-// if it doesn't start playing.
-TEST_F(AudioOutputProxyTest, CreateAndWait) {
- CreateAndWait(dispatcher_impl_);
-}
-
-// Create a stream, and verify that it is closed after kTestCloseDelayMs.
-// if it doesn't start playing.
-TEST_F(AudioOutputResamplerTest, CreateAndWait) {
- CreateAndWait(resampler_);
-}
-
-TEST_F(AudioOutputProxyTest, StartAndStop) {
- StartAndStop(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, StartAndStop_Mixer) {
- StartAndStop(mixer_);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, StartAndStop) {
- StartAndStop(resampler_);
-}
-
-TEST_F(AudioOutputProxyTest, CloseAfterStop) {
- CloseAfterStop(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, CloseAfterStop_Mixer) {
- CloseAfterStop(mixer_);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, CloseAfterStop) {
- CloseAfterStop(resampler_);
-}
-
-TEST_F(AudioOutputProxyTest, TwoStreams) {
- TwoStreams(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, TwoStreams_Mixer) {
- TwoStreams(mixer_);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, TwoStreams) {
- TwoStreams(resampler_);
-}
-
-// Two streams: verify that second stream is allocated when the first
-// starts playing.
-TEST_F(AudioOutputProxyTest, TwoStreams_OnePlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_OnePlaying(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-// Two streams: verify that only one device will be created.
-TEST_F(AudioOutputProxyTest, TwoStreams_OnePlaying_Mixer) {
- MockAudioOutputStream stream(&manager_, params_);
-
- InitDispatcher(base::TimeDelta::FromMilliseconds(kTestCloseDelayMs));
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
-
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Start(_))
- .Times(1);
- EXPECT_CALL(stream, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream, Stop())
- .Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(mixer_);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(mixer_);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
-
- proxy1->Start(&callback_);
- proxy1->Stop();
-
- proxy1->Close();
- proxy2->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, TwoStreams_OnePlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_OnePlaying(resampler_);
-}
-
-// Two streams, both are playing. Dispatcher should not open a third stream.
-TEST_F(AudioOutputProxyTest, TwoStreams_BothPlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_BothPlaying(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-// Two streams, both are playing. Still have to use single device.
-// Also verifies that every proxy stream gets its own pending_bytes.
-TEST_F(AudioOutputProxyTest, TwoStreams_BothPlaying_Mixer) {
- MockAudioOutputStream stream(&manager_, params_);
-
- InitDispatcher(base::TimeDelta::FromMilliseconds(kTestCloseDelayMs));
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
-
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Start(_))
- .Times(1);
- EXPECT_CALL(stream, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream, Stop())
- .Times(1);
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(mixer_);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(mixer_);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
-
- proxy1->Start(&callback_);
-
- // Mute the proxy. Resulting stream should still have correct length.
- proxy1->SetVolume(0.0);
-
- uint8 zeroes[4] = {0, 0, 0, 0};
- uint8 buf1[4] = {0};
- EXPECT_CALL(callback_,
- OnMoreData(NotNull(), 4,
- AllOf(Field(&AudioBuffersState::pending_bytes, 0),
- Field(&AudioBuffersState::hardware_delay_bytes, 0))))
- .WillOnce(DoAll(SetArrayArgument<0>(zeroes, zeroes + sizeof(zeroes)),
- Return(4)));
- mixer_->OnMoreData(buf1, sizeof(buf1), AudioBuffersState(0, 0));
- proxy2->Start(&callback_);
- uint8 buf2[4] = {0};
- EXPECT_CALL(callback_,
- OnMoreData(NotNull(), 4,
- AllOf(Field(&AudioBuffersState::pending_bytes, 4),
- Field(&AudioBuffersState::hardware_delay_bytes, 0))))
- .WillOnce(DoAll(SetArrayArgument<0>(zeroes, zeroes + sizeof(zeroes)),
- Return(4)));
- EXPECT_CALL(callback_,
- OnMoreData(NotNull(), 4,
- AllOf(Field(&AudioBuffersState::pending_bytes, 0),
- Field(&AudioBuffersState::hardware_delay_bytes, 0))))
- .WillOnce(DoAll(SetArrayArgument<0>(zeroes, zeroes + sizeof(zeroes)),
- Return(4)));
- mixer_->OnMoreData(buf2, sizeof(buf2), AudioBuffersState(4, 0));
- proxy1->Stop();
- proxy2->Stop();
-
- proxy1->Close();
- proxy2->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, TwoStreams_BothPlaying) {
- InitDispatcher(base::TimeDelta::FromSeconds(kTestBigCloseDelaySeconds));
- TwoStreams_BothPlaying(resampler_);
-}
-
-TEST_F(AudioOutputProxyTest, OpenFailed) {
- OpenFailed(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-TEST_F(AudioOutputProxyTest, OpenFailed_Mixer) {
- OpenFailed(mixer_);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, OpenFailed) {
- CommandLine::ForCurrentProcess()->AppendSwitch(
- switches::kDisableAudioFallback);
- OpenFailed(resampler_);
-}
-
-// Start() method failed.
-TEST_F(AudioOutputProxyTest, StartFailed) {
- StartFailed(dispatcher_impl_);
-}
-
-#if defined(ENABLE_AUDIO_MIXER)
-// Start() method failed.
-TEST_F(AudioOutputProxyTest, StartFailed_Mixer) {
- MockAudioOutputStream stream(&manager_, params_);
-
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
- EXPECT_CALL(stream, Start(_))
- .Times(1);
- EXPECT_CALL(stream, SetVolume(_))
- .Times(1);
- EXPECT_CALL(stream, Stop())
- .Times(1);
-
- AudioOutputProxy* proxy1 = new AudioOutputProxy(mixer_);
- AudioOutputProxy* proxy2 = new AudioOutputProxy(mixer_);
- EXPECT_TRUE(proxy1->Open());
- EXPECT_TRUE(proxy2->Open());
- proxy1->Start(&callback_);
- proxy1->Stop();
- proxy1->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-
- // Verify expectation before continueing.
- Mock::VerifyAndClear(&stream);
-
- // |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
-
- EXPECT_CALL(callback_, OnError(_, _))
- .Times(1);
-
- proxy2->Start(&callback_);
-
- Mock::VerifyAndClear(&callback_);
-
- proxy2->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-#endif
-
-TEST_F(AudioOutputResamplerTest, StartFailed) {
- StartFailed(resampler_);
-}
-
-// Simulate AudioOutputStream::Create() failure with a low latency stream and
-// ensure AudioOutputResampler falls back to the high latency path.
-TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
- MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .Times(2)
- .WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
- .WillRepeatedly(Return(&stream));
- EXPECT_CALL(stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
- EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-
-// Simulate AudioOutputStream::Open() failure with a low latency stream and
-// ensure AudioOutputResampler falls back to the high latency path.
-TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
- MockAudioOutputStream failed_stream(&manager_, params_);
- MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .Times(2)
- .WillOnce(Return(&failed_stream))
- .WillRepeatedly(Return(&okay_stream));
- EXPECT_CALL(failed_stream, Open())
- .WillOnce(Return(false));
- EXPECT_CALL(failed_stream, Close())
- .Times(1);
- EXPECT_CALL(okay_stream, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(okay_stream, Close())
- .Times(1);
-
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
- EXPECT_TRUE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-
-// Simulate failures to open both the low latency and the fallback high latency
-// stream and ensure AudioOutputResampler terminates normally.
-TEST_F(AudioOutputResamplerTest, LowLatencyFallbackFailed) {
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .Times(2)
- .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
-
- AudioOutputProxy* proxy = new AudioOutputProxy(resampler_);
- EXPECT_FALSE(proxy->Open());
- proxy->Close();
- WaitForCloseTimer(kTestCloseDelayMs);
-}
-
-// Simulate an eventual OpenStream() failure; i.e. successful OpenStream() calls
-// eventually followed by one which fails; root cause of http://crbug.com/150619
-TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
- MockAudioOutputStream stream3(&manager_, params_);
-
- // Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2))
- .WillOnce(Return(&stream3))
- .WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
-
- // Stream1 should be able to successfully open and start.
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, Close())
- .Times(1);
- EXPECT_CALL(stream1, SetVolume(_))
- .Times(1);
-
- // Stream2 should also be able to successfully open and start.
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, Close())
- .Times(1);
- EXPECT_CALL(stream2, SetVolume(_))
- .Times(1);
-
- // Stream3 should fail on Open() (yet still be closed since
- // MakeAudioOutputStream returned a valid AudioOutputStream object).
- EXPECT_CALL(stream3, Open())
- .WillOnce(Return(false));
- EXPECT_CALL(stream3, Close())
- .Times(1);
-
- // Open and start the first proxy and stream.
- AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_);
- EXPECT_TRUE(proxy1->Open());
- proxy1->Start(&callback_);
- OnStart();
-
- // Open and start the second proxy and stream.
- AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_);
- EXPECT_TRUE(proxy2->Open());
- proxy2->Start(&callback_);
- OnStart();
-
- // Attempt to open the third stream which should fail.
- AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_);
- EXPECT_FALSE(proxy3->Open());
-
- // Perform the required Stop()/Close() shutdown dance for each proxy. Under
- // the hood each proxy should correctly call CloseStream() if OpenStream()
- // succeeded or not.
- proxy3->Stop();
- proxy3->Close();
- proxy2->Stop();
- proxy2->Close();
- proxy1->Stop();
- proxy1->Close();
-
- // Wait for all of the messages to fly and then verify stream behavior.
- WaitForCloseTimer(kTestCloseDelayMs);
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_TRUE(stream2.stop_called());
- EXPECT_TRUE(stream2.start_called());
- EXPECT_FALSE(stream3.stop_called());
- EXPECT_FALSE(stream3.start_called());
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_resampler.cc b/src/media/audio/audio_output_resampler.cc
deleted file mode 100644
index 4734e40..0000000
--- a/src/media/audio/audio_output_resampler.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_output_resampler.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/command_line.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop.h"
-#include "base/metrics/histogram.h"
-#include "base/time.h"
-#include "build/build_config.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_output_dispatcher_impl.h"
-#include "media/audio/audio_output_proxy.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/sample_rates.h"
-#include "media/base/audio_converter.h"
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-#if defined(OS_WIN)
-#include "media/audio/win/core_audio_util_win.h"
-#endif
-
-namespace media {
-
-class OnMoreDataConverter
- : public AudioOutputStream::AudioSourceCallback,
- public AudioConverter::InputCallback {
- public:
- OnMoreDataConverter(const AudioParameters& input_params,
- const AudioParameters& output_params);
- virtual ~OnMoreDataConverter();
-
- // AudioSourceCallback interface.
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
- virtual void OnError(AudioOutputStream* stream, int code) OVERRIDE;
- virtual void WaitTillDataReady() OVERRIDE;
-
- // Sets |source_callback_|. If this is not a new object, then Stop() must be
- // called before Start().
- void Start(AudioOutputStream::AudioSourceCallback* callback);
-
- // Clears |source_callback_| and flushes the resampler.
- void Stop();
-
- private:
- // AudioConverter::InputCallback implementation.
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE;
-
- // Ratio of input bytes to output bytes used to correct playback delay with
- // regard to buffering and resampling.
- double io_ratio_;
-
- // Source callback and associated lock.
- base::Lock source_lock_;
- AudioOutputStream::AudioSourceCallback* source_callback_;
-
- // |source| passed to OnMoreIOData() which should be passed downstream.
- AudioBus* source_bus_;
-
- // Last AudioBuffersState object received via OnMoreData(), used to correct
- // playback delay by ProvideInput() and passed on to |source_callback_|.
- AudioBuffersState current_buffers_state_;
-
- const int input_bytes_per_second_;
-
- // Handles resampling, buffering, and channel mixing between input and output
- // parameters.
- AudioConverter audio_converter_;
-
- // If we're using WaveOut on Windows' we always have to wait for DataReady()
- // before calling |source_callback_|.
- bool waveout_wait_hack_;
-
- DISALLOW_COPY_AND_ASSIGN(OnMoreDataConverter);
-};
-
-// Record UMA statistics for hardware output configuration.
-static void RecordStats(const AudioParameters& output_params) {
- UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioBitsPerChannel", output_params.bits_per_sample(),
- limits::kMaxBitsPerSample);
- UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioChannelLayout", output_params.channel_layout(),
- CHANNEL_LAYOUT_MAX);
- UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioChannelCount", output_params.channels(),
- limits::kMaxChannels);
-
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
- UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioSamplesPerSecond", asr, kUnexpectedAudioSampleRate);
- } else {
- UMA_HISTOGRAM_COUNTS(
- "Media.HardwareAudioSamplesPerSecondUnexpected",
- output_params.sample_rate());
- }
-}
-
-// Record UMA statistics for hardware output configuration after fallback.
-static void RecordFallbackStats(const AudioParameters& output_params) {
- UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", true);
- UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioBitsPerChannel",
- output_params.bits_per_sample(), limits::kMaxBitsPerSample);
- UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioChannelLayout",
- output_params.channel_layout(), CHANNEL_LAYOUT_MAX);
- UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioChannelCount",
- output_params.channels(), limits::kMaxChannels);
-
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
- UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioSamplesPerSecond",
- asr, kUnexpectedAudioSampleRate);
- } else {
- UMA_HISTOGRAM_COUNTS(
- "Media.FallbackHardwareAudioSamplesPerSecondUnexpected",
- output_params.sample_rate());
- }
-}
-
-// Converts low latency based |output_params| into high latency appropriate
-// output parameters in error situations.
-static AudioParameters SetupFallbackParams(
- const AudioParameters& input_params, const AudioParameters& output_params) {
- // Choose AudioParameters appropriate for opening the device in high latency
- // mode. |kMinLowLatencyFrameSize| is arbitrarily based on Pepper Flash's
- // MAXIMUM frame size for low latency.
- static const int kMinLowLatencyFrameSize = 2048;
- int frames_per_buffer = std::min(
- std::max(input_params.frames_per_buffer(), kMinLowLatencyFrameSize),
- static_cast<int>(
- GetHighLatencyOutputBufferSize(input_params.sample_rate())));
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, input_params.channel_layout(),
- input_params.sample_rate(), input_params.bits_per_sample(),
- frames_per_buffer);
-}
-
-AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
- const AudioParameters& input_params,
- const AudioParameters& output_params,
- const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params),
- close_delay_(close_delay),
- output_params_(output_params),
- streams_opened_(false) {
- DCHECK(input_params.IsValid());
- DCHECK(output_params.IsValid());
- DCHECK_EQ(output_params_.format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
-
- // Record UMA statistics for the hardware configuration.
- RecordStats(output_params);
-
- Initialize();
-}
-
-AudioOutputResampler::~AudioOutputResampler() {
- DCHECK(callbacks_.empty());
-}
-
-void AudioOutputResampler::Initialize() {
- DCHECK(!streams_opened_);
- DCHECK(callbacks_.empty());
- dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, close_delay_);
-}
-
-bool AudioOutputResampler::OpenStream() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- if (dispatcher_->OpenStream()) {
- // Only record the UMA statistic if we didn't fallback during construction
- // and only for the first stream we open.
- if (!streams_opened_ &&
- output_params_.format() == AudioParameters::AUDIO_PCM_LOW_LATENCY) {
- UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", false);
- }
- streams_opened_ = true;
- return true;
- }
-
- // If we've already tried to open the stream in high latency mode or we've
- // successfully opened a stream previously, there's nothing more to be done.
- if (output_params_.format() == AudioParameters::AUDIO_PCM_LINEAR ||
- streams_opened_ || !callbacks_.empty()) {
- return false;
- }
-
- DCHECK_EQ(output_params_.format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
-
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableAudioFallback)) {
- LOG(ERROR) << "Open failed and automatic fallback to high latency audio "
- << "path is disabled, aborting.";
- return false;
- }
-
- DLOG(ERROR) << "Unable to open audio device in low latency mode. Falling "
- << "back to high latency audio output.";
-
- // Record UMA statistics about the hardware which triggered the failure so
- // we can debug and triage later.
- RecordFallbackStats(output_params_);
- output_params_ = SetupFallbackParams(params_, output_params_);
- Initialize();
-
- // Retry, if this fails, there's nothing left to do but report the error back.
- return dispatcher_->OpenStream();
-}
-
-bool AudioOutputResampler::StartStream(
- AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- OnMoreDataConverter* resampler_callback = NULL;
- CallbackMap::iterator it = callbacks_.find(stream_proxy);
- if (it == callbacks_.end()) {
- resampler_callback = new OnMoreDataConverter(params_, output_params_);
- callbacks_[stream_proxy] = resampler_callback;
- } else {
- resampler_callback = it->second;
- }
- resampler_callback->Start(callback);
- return dispatcher_->StartStream(resampler_callback, stream_proxy);
-}
-
-void AudioOutputResampler::StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- dispatcher_->StreamVolumeSet(stream_proxy, volume);
-}
-
-void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- dispatcher_->StopStream(stream_proxy);
-
- // Now that StopStream() has completed the underlying physical stream should
- // be stopped and no longer calling OnMoreData(), making it safe to Stop() the
- // OnMoreDataConverter.
- CallbackMap::iterator it = callbacks_.find(stream_proxy);
- if (it != callbacks_.end())
- it->second->Stop();
-}
-
-void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
- dispatcher_->CloseStream(stream_proxy);
-
- // We assume that StopStream() is always called prior to CloseStream(), so
- // that it is safe to delete the OnMoreDataConverter here.
- CallbackMap::iterator it = callbacks_.find(stream_proxy);
- if (it != callbacks_.end()) {
- delete it->second;
- callbacks_.erase(it);
- }
-}
-
-void AudioOutputResampler::Shutdown() {
- DCHECK_EQ(MessageLoop::current(), message_loop_);
-
- // No AudioOutputProxy objects should hold a reference to us when we get
- // to this stage.
- DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
-
- dispatcher_->Shutdown();
- DCHECK(callbacks_.empty());
-}
-
-OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
- const AudioParameters& output_params)
- : source_callback_(NULL),
- source_bus_(NULL),
- input_bytes_per_second_(input_params.GetBytesPerSecond()),
- audio_converter_(input_params, output_params, false),
- waveout_wait_hack_(false) {
- io_ratio_ =
- static_cast<double>(input_params.GetBytesPerSecond()) /
- output_params.GetBytesPerSecond();
-
- // TODO(dalecurtis): We should require all render side clients to use a
- // buffer size that's a multiple of the hardware buffer size scaled by the
- // request_sample_rate / hw_sample_rate. Doing so ensures each hardware
- // request for audio data results in only a single render side callback and
- // would allow us to remove this hack. See http://crbug.com/162207.
-#if defined(OS_WIN)
- waveout_wait_hack_ =
- output_params.format() == AudioParameters::AUDIO_PCM_LINEAR ||
- !CoreAudioUtil::IsSupported();
-#endif
-}
-
-OnMoreDataConverter::~OnMoreDataConverter() {}
-
-void OnMoreDataConverter::Start(
- AudioOutputStream::AudioSourceCallback* callback) {
- base::AutoLock auto_lock(source_lock_);
- DCHECK(!source_callback_);
- source_callback_ = callback;
-
- // While AudioConverter can handle multiple inputs, we're using it only with
- // a single input currently. Eventually this may be the basis for a browser
- // side mixer.
- audio_converter_.AddInput(this);
-}
-
-void OnMoreDataConverter::Stop() {
- base::AutoLock auto_lock(source_lock_);
- source_callback_ = NULL;
- audio_converter_.RemoveInput(this);
-}
-
-int OnMoreDataConverter::OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int OnMoreDataConverter::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- base::AutoLock auto_lock(source_lock_);
- // While we waited for |source_lock_| the callback might have been cleared.
- if (!source_callback_) {
- dest->Zero();
- return dest->frames();
- }
-
- source_bus_ = source;
- current_buffers_state_ = buffers_state;
- audio_converter_.Convert(dest);
-
- // Always return the full number of frames requested, ProvideInput_Locked()
- // will pad with silence if it wasn't able to acquire enough data.
- return dest->frames();
-}
-
-double OnMoreDataConverter::ProvideInput(AudioBus* dest,
- base::TimeDelta buffer_delay) {
- source_lock_.AssertAcquired();
-
- // Adjust playback delay to include |buffer_delay|.
- // TODO(dalecurtis): Stop passing bytes around, it doesn't make sense since
- // AudioBus is just float data. Use TimeDelta instead.
- AudioBuffersState new_buffers_state;
- new_buffers_state.pending_bytes =
- io_ratio_ * (current_buffers_state_.total_bytes() +
- buffer_delay.InSecondsF() * input_bytes_per_second_);
-
- if (waveout_wait_hack_)
- source_callback_->WaitTillDataReady();
-
- // Retrieve data from the original callback.
- int frames = source_callback_->OnMoreIOData(
- source_bus_, dest, new_buffers_state);
-
- // |source_bus_| should only be provided once.
- // TODO(dalecurtis, crogers): This is not a complete fix. If ProvideInput()
- // is called multiple times, we need to do something more clever here.
- source_bus_ = NULL;
-
- // Zero any unfilled frames if anything was filled, otherwise we'll just
- // return a volume of zero and let AudioConverter drop the output.
- if (frames > 0 && frames < dest->frames())
- dest->ZeroFramesPartial(frames, dest->frames() - frames);
-
- // TODO(dalecurtis): Return the correct volume here.
- return frames > 0 ? 1 : 0;
-}
-
-void OnMoreDataConverter::OnError(AudioOutputStream* stream, int code) {
- base::AutoLock auto_lock(source_lock_);
- if (source_callback_)
- source_callback_->OnError(stream, code);
-}
-
-void OnMoreDataConverter::WaitTillDataReady() {
- base::AutoLock auto_lock(source_lock_);
- if (source_callback_)
- source_callback_->WaitTillDataReady();
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_output_resampler.h b/src/media/audio/audio_output_resampler.h
deleted file mode 100644
index 057cf34..0000000
--- a/src/media/audio/audio_output_resampler.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
-#define MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
-
-#include <map>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class OnMoreDataConverter;
-
-// AudioOutputResampler is a browser-side resampling and buffering solution
-// which ensures audio data is always output at given parameters. See the
-// AudioConverter class for details on the conversion process.
-//
-// AOR works by intercepting the AudioSourceCallback provided to StartStream()
-// and redirecting it through an AudioConverter instance. AudioBuffersState is
-// adjusted for buffer delay caused by the conversion process.
-//
-// AOR will automatically fall back from AUDIO_PCM_LOW_LATENCY to
-// AUDIO_PCM_LINEAR if the output device fails to open at the requested output
-// parameters.
-//
-// TODO(dalecurtis): Ideally the low latency path will be as reliable as the
-// high latency path once we have channel mixing and support querying for the
-// hardware's configured bit depth. Monitor the UMA stats for fallback and
-// remove fallback support once it's stable. http://crbug.com/148418
-class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
- public:
- AudioOutputResampler(AudioManager* audio_manager,
- const AudioParameters& input_params,
- const AudioParameters& output_params,
- const base::TimeDelta& close_delay);
-
- // AudioOutputDispatcher interface.
- virtual bool OpenStream() OVERRIDE;
- virtual bool StartStream(AudioOutputStream::AudioSourceCallback* callback,
- AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void StopStream(AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void StreamVolumeSet(AudioOutputProxy* stream_proxy,
- double volume) OVERRIDE;
- virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
- virtual void Shutdown() OVERRIDE;
-
- private:
- friend class base::RefCountedThreadSafe<AudioOutputResampler>;
- virtual ~AudioOutputResampler();
-
- // Used to initialize and reinitialize |dispatcher_|.
- void Initialize();
-
- // Dispatcher to proxy all AudioOutputDispatcher calls too.
- scoped_refptr<AudioOutputDispatcher> dispatcher_;
-
- // Map of outstanding OnMoreDataConverter objects. A new object is created
- // on every StartStream() call and destroyed on CloseStream().
- typedef std::map<AudioOutputProxy*, OnMoreDataConverter*> CallbackMap;
- CallbackMap callbacks_;
-
- // Used by AudioOutputDispatcherImpl; kept so we can reinitialize on the fly.
- base::TimeDelta close_delay_;
-
- // AudioParameters used to setup the output stream.
- AudioParameters output_params_;
-
- // Whether any streams have been opened through |dispatcher_|, if so we can't
- // fallback on future OpenStream() failures.
- bool streams_opened_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioOutputResampler);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_OUTPUT_RESAMPLER_H_
diff --git a/src/media/audio/audio_parameters.cc b/src/media/audio/audio_parameters.cc
deleted file mode 100644
index 0d9263f..0000000
--- a/src/media/audio/audio_parameters.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_parameters.h"
-
-#include "media/base/limits.h"
-
-namespace media {
-
-AudioParameters::AudioParameters()
- : format_(AUDIO_PCM_LINEAR),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- sample_rate_(0),
- bits_per_sample_(0),
- frames_per_buffer_(0),
- channels_(0) {
-}
-
-AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer)
- : format_(format),
- channel_layout_(channel_layout),
- sample_rate_(sample_rate),
- bits_per_sample_(bits_per_sample),
- frames_per_buffer_(frames_per_buffer),
- channels_(ChannelLayoutToChannelCount(channel_layout)) {
-}
-
-void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer) {
- format_ = format;
- channel_layout_ = channel_layout;
- sample_rate_ = sample_rate;
- bits_per_sample_ = bits_per_sample;
- frames_per_buffer_ = frames_per_buffer;
- channels_ = ChannelLayoutToChannelCount(channel_layout);
-}
-
-bool AudioParameters::IsValid() const {
- return (format_ >= AUDIO_PCM_LINEAR) &&
- (format_ < AUDIO_LAST_FORMAT) &&
- (channels_ > 0) &&
- (channels_ <= media::limits::kMaxChannels) &&
- (channel_layout_ > CHANNEL_LAYOUT_UNSUPPORTED) &&
- (channel_layout_ < CHANNEL_LAYOUT_MAX) &&
- (sample_rate_ >= media::limits::kMinSampleRate) &&
- (sample_rate_ <= media::limits::kMaxSampleRate) &&
- (bits_per_sample_ > 0) &&
- (bits_per_sample_ <= media::limits::kMaxBitsPerSample) &&
- (frames_per_buffer_ > 0) &&
- (frames_per_buffer_ <= media::limits::kMaxSamplesPerPacket);
-}
-
-int AudioParameters::GetBytesPerBuffer() const {
- return frames_per_buffer_ * GetBytesPerFrame();
-}
-
-int AudioParameters::GetBytesPerSecond() const {
- return sample_rate_ * GetBytesPerFrame();
-}
-
-int AudioParameters::GetBytesPerFrame() const {
- return channels_ * bits_per_sample_ / 8;
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_parameters.h b/src/media/audio/audio_parameters.h
deleted file mode 100644
index 0225468..0000000
--- a/src/media/audio/audio_parameters.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_PARAMETERS_H_
-#define MEDIA_AUDIO_AUDIO_PARAMETERS_H_
-
-#include "base/basictypes.h"
-#include "media/base/channel_layout.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-struct MEDIA_EXPORT AudioInputBufferParameters {
- double volume;
- uint32 size;
-};
-
-// Use a struct-in-struct approach to ensure that we can calculate the required
-// size as sizeof(AudioInputBufferParameters) + #(bytes in audio buffer) without
-// using packing.
-struct MEDIA_EXPORT AudioInputBuffer {
- AudioInputBufferParameters params;
- int8 audio[1];
-};
-
-class MEDIA_EXPORT AudioParameters {
- public:
- enum Format {
- AUDIO_PCM_LINEAR = 0, // PCM is 'raw' amplitude samples.
- AUDIO_PCM_LOW_LATENCY, // Linear PCM, low latency requested.
- AUDIO_FAKE, // Creates a fake AudioOutputStream object.
- AUDIO_VIRTUAL, // Creates a VirtualAudioInputStream object.
- // Applies to input streams only.
- AUDIO_LAST_FORMAT // Only used for validation of format.
- };
-
- enum {
- // Telephone quality sample rate, mostly for speech-only audio.
- kTelephoneSampleRate = 8000,
- // CD sampling rate is 44.1 KHz or conveniently 2x2x3x3x5x5x7x7.
- kAudioCDSampleRate = 44100,
- };
-
- AudioParameters();
- AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer);
- void Reset(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer);
-
- // Checks that all values are in the expected range. All limits are specified
- // in media::Limits.
- bool IsValid() const;
-
- // Returns size of audio buffer in bytes.
- int GetBytesPerBuffer() const;
-
- // Returns the number of bytes representing one second of audio.
- int GetBytesPerSecond() const;
-
- // Returns the number of bytes representing a frame of audio.
- int GetBytesPerFrame() const;
-
- Format format() const { return format_; }
- ChannelLayout channel_layout() const { return channel_layout_; }
- int sample_rate() const { return sample_rate_; }
- int bits_per_sample() const { return bits_per_sample_; }
- int frames_per_buffer() const { return frames_per_buffer_; }
- int channels() const { return channels_; }
-
- private:
- Format format_; // Format of the stream.
- ChannelLayout channel_layout_; // Order of surround sound channels.
- int sample_rate_; // Sampling frequency/rate.
- int bits_per_sample_; // Number of bits per sample.
- int frames_per_buffer_; // Number of frames in a buffer.
-
- int channels_; // Number of channels. Value set based on
- // |channel_layout|.
-};
-
-// Comparison is useful when AudioParameters is used with std structures.
-inline bool operator<(const AudioParameters& a, const AudioParameters& b) {
- if (a.format() != b.format())
- return a.format() < b.format();
- if (a.channels() != b.channels())
- return a.channels() < b.channels();
- if (a.sample_rate() != b.sample_rate())
- return a.sample_rate() < b.sample_rate();
- if (a.bits_per_sample() != b.bits_per_sample())
- return a.bits_per_sample() < b.bits_per_sample();
- return a.frames_per_buffer() < b.frames_per_buffer();
-}
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_PARAMETERS_H_
diff --git a/src/media/audio/audio_parameters_unittest.cc b/src/media/audio/audio_parameters_unittest.cc
deleted file mode 100644
index fd42e14..0000000
--- a/src/media/audio/audio_parameters_unittest.cc
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/string_number_conversions.h"
-#include "media/audio/audio_parameters.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-TEST(AudioParameters, Constructor_Default) {
- AudioParameters::Format expected_format = AudioParameters::AUDIO_PCM_LINEAR;
- int expected_bits = 0;
- int expected_channels = 0;
- ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_NONE;
- int expected_rate = 0;
- int expected_samples = 0;
-
- AudioParameters params;
-
- EXPECT_EQ(expected_format, params.format());
- EXPECT_EQ(expected_bits, params.bits_per_sample());
- EXPECT_EQ(expected_channels, params.channels());
- EXPECT_EQ(expected_channel_layout, params.channel_layout());
- EXPECT_EQ(expected_rate, params.sample_rate());
- EXPECT_EQ(expected_samples, params.frames_per_buffer());
-}
-
-TEST(AudioParameters, Constructor_ParameterValues) {
- AudioParameters::Format expected_format =
- AudioParameters::AUDIO_PCM_LOW_LATENCY;
- int expected_bits = 16;
- int expected_channels = 6;
- ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_5_1;
- int expected_rate = 44100;
- int expected_samples = 880;
-
- AudioParameters params(expected_format, expected_channel_layout,
- expected_rate, expected_bits, expected_samples);
-
- EXPECT_EQ(expected_format, params.format());
- EXPECT_EQ(expected_bits, params.bits_per_sample());
- EXPECT_EQ(expected_channels, params.channels());
- EXPECT_EQ(expected_channel_layout, params.channel_layout());
- EXPECT_EQ(expected_rate, params.sample_rate());
- EXPECT_EQ(expected_samples, params.frames_per_buffer());
-}
-
-TEST(AudioParameters, GetBytesPerBuffer) {
- EXPECT_EQ(100, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_MONO, 1000, 8, 100)
- .GetBytesPerBuffer());
- EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_MONO, 1000, 16, 100)
- .GetBytesPerBuffer());
- EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 1000, 8, 100)
- .GetBytesPerBuffer());
- EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_MONO, 1000, 8, 200)
- .GetBytesPerBuffer());
- EXPECT_EQ(800, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 1000, 16, 200)
- .GetBytesPerBuffer());
-}
-
-TEST(AudioParameters, GetBytesPerSecond) {
- EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_NONE, 0, 0, 0)
- .GetBytesPerSecond());
- EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 0, 0, 0)
- .GetBytesPerSecond());
- EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_NONE, 100, 0, 0)
- .GetBytesPerSecond());
- EXPECT_EQ(0, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_NONE, 0, 8, 0)
- .GetBytesPerSecond());
- EXPECT_EQ(200, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 100, 8, 0)
- .GetBytesPerSecond());
-}
-
-TEST(AudioParameters, Compare) {
- AudioParameters values[] = {
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 1000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 1000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 1000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 1000, 16, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 2000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 2000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 2000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 2000, 16, 200),
-
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 1000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 1000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 1000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 1000, 16, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 2000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 2000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 2000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 2000, 16, 200),
-
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 1000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 1000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 1000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 1000, 16, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 2000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 2000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 2000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
- 2000, 16, 200),
-
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 1000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 1000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 1000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 1000, 16, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 2000, 8, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 2000, 8, 200),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 2000, 16, 100),
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, 2000, 16, 200),
- };
-
- for (size_t i = 0; i < arraysize(values); ++i) {
- for (size_t j = 0; j < arraysize(values); ++j) {
- SCOPED_TRACE("i=" + base::IntToString(i) + " j=" + base::IntToString(j));
- EXPECT_EQ(i < j, values[i] < values[j]);
- }
-
- // Verify that a value is never less than itself.
- EXPECT_FALSE(values[i] < values[i]);
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/audio_util.cc b/src/media/audio/audio_util.cc
deleted file mode 100644
index e91610f..0000000
--- a/src/media/audio/audio_util.cc
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Software adjust volume of samples, allows each audio stream its own
-// volume without impacting master volume for chrome and other applications.
-
-// Implemented as templates to allow 8, 16 and 32 bit implementations.
-// 8 bit is unsigned and biased by 128.
-
-// TODO(vrk): This file has been running pretty wild and free, and it's likely
-// that a lot of the functions can be simplified and made more elegant. Revisit
-// after other audio cleanup is done. (crbug.com/120319)
-
-#include "media/audio/audio_util.h"
-
-#include <algorithm>
-#include <limits>
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/string_number_conversions.h"
-#include "base/time.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/media_switches.h"
-
-#if defined(OS_MACOSX)
-#include "media/audio/mac/audio_low_latency_input_mac.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-#elif defined(OS_WIN)
-#include "base/win/windows_version.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/win/audio_low_latency_input_win.h"
-#include "media/audio/win/audio_low_latency_output_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/limits.h"
-#endif
-
-namespace media {
-
-// Returns user buffer size as specified on the command line or 0 if no buffer
-// size has been specified.
-static int GetUserBufferSize() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- int buffer_size = 0;
- std::string buffer_size_str(cmd_line->GetSwitchValueASCII(
- switches::kAudioBufferSize));
- if (base::StringToInt(buffer_size_str, &buffer_size) && buffer_size > 0) {
- return buffer_size;
- }
-
- return 0;
-}
-
-// TODO(fbarchard): Convert to intrinsics for better efficiency.
-template<class Fixed>
-static int ScaleChannel(int channel, int volume) {
- return static_cast<int>((static_cast<Fixed>(channel) * volume) >> 16);
-}
-
-template<class Format, class Fixed, int bias>
-static void AdjustVolume(Format* buf_out,
- int sample_count,
- int fixed_volume) {
- for (int i = 0; i < sample_count; ++i) {
- buf_out[i] = static_cast<Format>(ScaleChannel<Fixed>(buf_out[i] - bias,
- fixed_volume) + bias);
- }
-}
-
-template<class Fixed, int min_value, int max_value>
-static int AddSaturated(int val, int adder) {
- Fixed sum = static_cast<Fixed>(val) + static_cast<Fixed>(adder);
- if (sum > max_value)
- return max_value;
- if (sum < min_value)
- return min_value;
- return static_cast<int>(sum);
-}
-
-// AdjustVolume() does an in place audio sample change.
-bool AdjustVolume(void* buf,
- size_t buflen,
- int channels,
- int bytes_per_sample,
- float volume) {
- DCHECK(buf);
- if (volume < 0.0f || volume > 1.0f)
- return false;
- if (volume == 1.0f) {
- return true;
- } else if (volume == 0.0f) {
- memset(buf, 0, buflen);
- return true;
- }
- if (channels > 0 && channels <= 8 && bytes_per_sample > 0) {
- int sample_count = buflen / bytes_per_sample;
- const int fixed_volume = static_cast<int>(volume * 65536);
- if (bytes_per_sample == 1) {
- AdjustVolume<uint8, int32, 128>(reinterpret_cast<uint8*>(buf),
- sample_count,
- fixed_volume);
- return true;
- } else if (bytes_per_sample == 2) {
- AdjustVolume<int16, int32, 0>(reinterpret_cast<int16*>(buf),
- sample_count,
- fixed_volume);
- return true;
- } else if (bytes_per_sample == 4) {
- AdjustVolume<int32, int64, 0>(reinterpret_cast<int32*>(buf),
- sample_count,
- fixed_volume);
- return true;
- }
- }
- return false;
-}
-
-// TODO(enal): use template specialization and size-specific intrinsics.
-// Call is on the time-critical path, and by using SSE/AVX
-// instructions we can speed things up by ~4-8x, more for the case
-// when we have to adjust volume as well.
-template<class Format, class Fixed, int min_value, int max_value, int bias>
-static void MixStreams(Format* dst, Format* src, int count, float volume) {
- if (volume == 0.0f)
- return;
- if (volume == 1.0f) {
- // Most common case -- no need to adjust volume.
- for (int i = 0; i < count; ++i) {
- Fixed value = AddSaturated<Fixed, min_value, max_value>(dst[i] - bias,
- src[i] - bias);
- dst[i] = static_cast<Format>(value + bias);
- }
- } else {
- // General case -- have to adjust volume before mixing.
- const int fixed_volume = static_cast<int>(volume * 65536);
- for (int i = 0; i < count; ++i) {
- Fixed adjusted_src = ScaleChannel<Fixed>(src[i] - bias, fixed_volume);
- Fixed value = AddSaturated<Fixed, min_value, max_value>(dst[i] - bias,
- adjusted_src);
- dst[i] = static_cast<Format>(value + bias);
- }
- }
-}
-
-void MixStreams(void* dst,
- void* src,
- size_t buflen,
- int bytes_per_sample,
- float volume) {
- DCHECK(dst);
- DCHECK(src);
- DCHECK_GE(volume, 0.0f);
- DCHECK_LE(volume, 1.0f);
- switch (bytes_per_sample) {
- case 1:
- MixStreams<uint8, int32, kint8min, kint8max, 128>(
- static_cast<uint8*>(dst),
- static_cast<uint8*>(src),
- buflen,
- volume);
- break;
- case 2:
- DCHECK_EQ(0u, buflen % 2);
- MixStreams<int16, int32, kint16min, kint16max, 0>(
- static_cast<int16*>(dst),
- static_cast<int16*>(src),
- buflen / 2,
- volume);
- break;
- case 4:
- DCHECK_EQ(0u, buflen % 4);
- MixStreams<int32, int64, kint32min, kint32max, 0>(
- static_cast<int32*>(dst),
- static_cast<int32*>(src),
- buflen / 4,
- volume);
- break;
- default:
- NOTREACHED() << "Illegal bytes per sample";
- break;
- }
-}
-
-int GetAudioHardwareSampleRate() {
-#if defined(OS_MACOSX)
- // Hardware sample-rate on the Mac can be configured, so we must query.
- return AUAudioOutputStream::HardwareSampleRate();
-#elif defined(OS_WIN)
- if (!CoreAudioUtil::IsSupported()) {
- // Fall back to Windows Wave implementation on Windows XP or lower
- // and use 48kHz as default input sample rate.
- return 48000;
- }
-
- // TODO(crogers): tune this rate for best possible WebAudio performance.
- // WebRTC works well at 48kHz and a buffer size of 480 samples will be used
- // for this case. Note that exclusive mode is experimental.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
- // This sample rate will be combined with a buffer size of 256 samples
- // (see GetAudioHardwareBufferSize()), which corresponds to an output
- // delay of ~5.33ms.
- return 48000;
- }
-
- // Hardware sample-rate on Windows can be configured, so we must query.
- // TODO(henrika): improve possibility to specify an audio endpoint.
- // Use the default device (same as for Wave) for now to be compatible
- // or possibly remove the ERole argument completely until it is in use.
- return WASAPIAudioOutputStream::HardwareSampleRate(eConsole);
-#elif defined(OS_ANDROID)
- return 16000;
-#else
- // Hardware for Linux is nearly always 48KHz.
- // TODO(crogers) : return correct value in rare non-48KHz cases.
- return 48000;
-#endif
-}
-
-int GetAudioInputHardwareSampleRate(const std::string& device_id) {
- // TODO(henrika): add support for device selection on all platforms.
- // Only exists on Windows today.
-#if defined(OS_MACOSX)
- return AUAudioInputStream::HardwareSampleRate();
-#elif defined(OS_WIN)
- if (!CoreAudioUtil::IsSupported()) {
- return 48000;
- }
- return WASAPIAudioInputStream::HardwareSampleRate(device_id);
-#elif defined(OS_ANDROID)
- return 16000;
-#else
- return 48000;
-#endif
-}
-
-size_t GetAudioHardwareBufferSize() {
- int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size)
- return user_buffer_size;
-
- // The sizes here were determined by experimentation and are roughly
- // the lowest value (for low latency) that still allowed glitch-free
- // audio under high loads.
- //
- // For Mac OS X and Windows the chromium audio backend uses a low-latency
- // Core Audio API, so a low buffer size is possible. For Linux, further
- // tuning may be needed.
-#if defined(OS_MACOSX)
- return 128;
-#elif defined(OS_WIN)
- // Buffer size to use when a proper size can't be determined from the system.
- static const int kFallbackBufferSize = 4096;
-
- if (!CoreAudioUtil::IsSupported()) {
- // Fall back to Windows Wave implementation on Windows XP or lower
- // and assume 48kHz as default sample rate.
- return kFallbackBufferSize;
- }
-
- // TODO(crogers): tune this size to best possible WebAudio performance.
- // WebRTC always uses 10ms for Windows and does not call this method.
- // Note that exclusive mode is experimental.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
- return 256;
- }
-
- // TODO(henrika): remove when the --enable-webaudio-input flag is no longer
- // utilized.
- if (cmd_line->HasSwitch(switches::kEnableWebAudioInput)) {
- AudioParameters params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
- ¶ms);
- return FAILED(hr) ? kFallbackBufferSize : params.frames_per_buffer();
- }
-
- // This call must be done on a COM thread configured as MTA.
- // TODO(tommi): http://code.google.com/p/chromium/issues/detail?id=103835.
- int mixing_sample_rate =
- WASAPIAudioOutputStream::HardwareSampleRate(eConsole);
-
- // Windows will return a sample rate of 0 when no audio output is available
- // (i.e. via RemoteDesktop with remote audio disabled), but we should never
- // return a buffer size of zero.
- if (mixing_sample_rate == 0)
- return kFallbackBufferSize;
-
- // Use different buffer sizes depening on the sample rate . The existing
- // WASAPI implementation is tuned to provide the most stable callback
- // sequence using these combinations.
- if (mixing_sample_rate % 11025 == 0)
- // Use buffer size of ~10.15873 ms.
- return (112 * (mixing_sample_rate / 11025));
-
- if (mixing_sample_rate % 8000 == 0)
- // Use buffer size of 10ms.
- return (80 * (mixing_sample_rate / 8000));
-
- // Ensure we always return a buffer size which is somewhat appropriate.
- LOG(ERROR) << "Unknown sample rate " << mixing_sample_rate << " detected.";
- if (mixing_sample_rate > limits::kMinSampleRate)
- return (mixing_sample_rate / 100);
- return kFallbackBufferSize;
-#else
- return 2048;
-#endif
-}
-
-ChannelLayout GetAudioInputHardwareChannelLayout(const std::string& device_id) {
- // TODO(henrika): add support for device selection on all platforms.
- // Only exists on Windows today.
-#if defined(OS_MACOSX)
- return CHANNEL_LAYOUT_MONO;
-#elif defined(OS_WIN)
- if (!CoreAudioUtil::IsSupported()) {
- // Fall back to Windows Wave implementation on Windows XP or lower and
- // use stereo by default.
- return CHANNEL_LAYOUT_STEREO;
- }
- return WASAPIAudioInputStream::HardwareChannelCount(device_id) == 1 ?
- CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
-#else
- return CHANNEL_LAYOUT_STEREO;
-#endif
-}
-
-// Computes a buffer size based on the given |sample_rate|. Must be used in
-// conjunction with AUDIO_PCM_LINEAR.
-size_t GetHighLatencyOutputBufferSize(int sample_rate) {
- int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size)
- return user_buffer_size;
-
- // TODO(vrk/crogers): The buffer sizes that this function computes is probably
- // overly conservative. However, reducing the buffer size to 2048-8192 bytes
- // caused crbug.com/108396. This computation should be revisited while making
- // sure crbug.com/108396 doesn't happen again.
-
- // The minimum number of samples in a hardware packet.
- // This value is selected so that we can handle down to 5khz sample rate.
- static const size_t kMinSamplesPerHardwarePacket = 1024;
-
- // The maximum number of samples in a hardware packet.
- // This value is selected so that we can handle up to 192khz sample rate.
- static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
-
- // This constant governs the hardware audio buffer size, this value should be
- // chosen carefully.
- // This value is selected so that we have 8192 samples for 48khz streams.
- static const size_t kMillisecondsPerHardwarePacket = 170;
-
- // Select the number of samples that can provide at least
- // |kMillisecondsPerHardwarePacket| worth of audio data.
- size_t samples = kMinSamplesPerHardwarePacket;
- while (samples <= kMaxSamplesPerHardwarePacket &&
- samples * base::Time::kMillisecondsPerSecond <
- sample_rate * kMillisecondsPerHardwarePacket) {
- samples *= 2;
- }
- return samples;
-}
-
-#if defined(OS_WIN)
-
-int NumberOfWaveOutBuffers() {
- // Use 4 buffers for Vista, 3 for everyone else:
- // - The entire Windows audio stack was rewritten for Windows Vista and wave
- // out performance was degraded compared to XP.
- // - The regression was fixed in Windows 7 and most configurations will work
- // with 2, but some (e.g., some Sound Blasters) still need 3.
- // - Some XP configurations (even multi-processor ones) also need 3.
- return (base::win::GetVersion() == base::win::VERSION_VISTA) ? 4 : 3;
-}
-
-#endif
-
-} // namespace media
diff --git a/src/media/audio/audio_util.h b/src/media/audio/audio_util.h
deleted file mode 100644
index 18cda41..0000000
--- a/src/media/audio/audio_util.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_AUDIO_UTIL_H_
-#define MEDIA_AUDIO_AUDIO_UTIL_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "media/base/channel_layout.h"
-#include "media/base/media_export.h"
-
-namespace base {
-class SharedMemory;
-}
-
-namespace media {
-class AudioBus;
-
-// For all audio functions 3 audio formats are supported:
-// 8 bits unsigned 0 to 255.
-// 16 bit signed (little endian).
-// 32 bit signed (little endian)
-
-// AdjustVolume() does a software volume adjustment of a sample buffer.
-// The samples are multiplied by the volume, which should range from
-// 0.0 (mute) to 1.0 (full volume).
-// Using software allows each audio and video to have its own volume without
-// affecting the master volume.
-// In the future the function may be used to adjust the sample format to
-// simplify hardware requirements and to support a wider variety of input
-// formats.
-// The buffer is modified in-place to avoid memory management, as this
-// function may be called in performance critical code.
-MEDIA_EXPORT bool AdjustVolume(void* buf,
- size_t buflen,
- int channels,
- int bytes_per_sample,
- float volume);
-
-// MixStreams() mixes 2 audio streams with same sample rate and number of
-// samples, adjusting volume on one of them.
-// Dst += Src * volume.
-MEDIA_EXPORT void MixStreams(void* dst,
- void* src,
- size_t buflen,
- int bytes_per_sample,
- float volume);
-
-// Returns the default audio output hardware sample-rate.
-MEDIA_EXPORT int GetAudioHardwareSampleRate();
-
-// Returns the audio input hardware sample-rate for the specified device.
-MEDIA_EXPORT int GetAudioInputHardwareSampleRate(
- const std::string& device_id);
-
-// Returns the optimal low-latency buffer size for the audio hardware.
-// This is the smallest buffer size the system can comfortably render
-// at without glitches. The buffer size is in sample-frames.
-MEDIA_EXPORT size_t GetAudioHardwareBufferSize();
-
-// Returns the channel layout for the specified audio input device.
-MEDIA_EXPORT ChannelLayout GetAudioInputHardwareChannelLayout(
- const std::string& device_id);
-
-// Computes a buffer size based on the given |sample_rate|. Must be used in
-// conjunction with AUDIO_PCM_LINEAR.
-MEDIA_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
-
-#if defined(OS_WIN)
-
-// Returns number of buffers to be used by wave out.
-MEDIA_EXPORT int NumberOfWaveOutBuffers();
-
-#endif // defined(OS_WIN)
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_AUDIO_UTIL_H_
diff --git a/src/media/audio/audio_util_unittest.cc b/src/media/audio/audio_util_unittest.cc
deleted file mode 100644
index 2643b99..0000000
--- a/src/media/audio/audio_util_unittest.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "media/audio/audio_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-// Number of samples in each audio array.
-static const size_t kNumberOfSamples = 4;
-
-namespace media {
-
-TEST(AudioUtilTest, AdjustVolume_u8) {
- // Test AdjustVolume() on 8 bit samples.
- uint8 samples_u8[kNumberOfSamples] = { 4, 0x40, 0x80, 0xff };
- uint8 expected_u8[kNumberOfSamples] = { (4 - 128) / 2 + 128,
- (0x40 - 128) / 2 + 128,
- (0x80 - 128) / 2 + 128,
- (0xff - 128) / 2 + 128 };
- bool result_u8 = media::AdjustVolume(samples_u8, sizeof(samples_u8),
- 1, // channels.
- sizeof(samples_u8[0]),
- 0.5f);
- EXPECT_TRUE(result_u8);
- int expected_test = memcmp(samples_u8, expected_u8, sizeof(expected_u8));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, AdjustVolume_s16) {
- // Test AdjustVolume() on 16 bit samples.
- int16 samples_s16[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int16 expected_s16[kNumberOfSamples] = { -1, 0x10, -8192, 30 };
- bool result_s16 = media::AdjustVolume(samples_s16, sizeof(samples_s16),
- 2, // channels.
- sizeof(samples_s16[0]),
- 0.25f);
- EXPECT_TRUE(result_s16);
- int expected_test = memcmp(samples_s16, expected_s16, sizeof(expected_s16));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, AdjustVolume_s16_zero) {
- // Test AdjustVolume() on 16 bit samples.
- int16 samples_s16[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int16 expected_s16[kNumberOfSamples] = { 0, 0, 0, 0 };
- bool result_s16 = media::AdjustVolume(samples_s16, sizeof(samples_s16),
- 2, // channels.
- sizeof(samples_s16[0]),
- 0.0f);
- EXPECT_TRUE(result_s16);
- int expected_test = memcmp(samples_s16, expected_s16, sizeof(expected_s16));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, AdjustVolume_s16_one) {
- // Test AdjustVolume() on 16 bit samples.
- int16 samples_s16[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int16 expected_s16[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- bool result_s16 = media::AdjustVolume(samples_s16, sizeof(samples_s16),
- 2, // channels.
- sizeof(samples_s16[0]),
- 1.0f);
- EXPECT_TRUE(result_s16);
- int expected_test = memcmp(samples_s16, expected_s16, sizeof(expected_s16));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, AdjustVolume_s32) {
- // Test AdjustVolume() on 32 bit samples.
- int32 samples_s32[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int32 expected_s32[kNumberOfSamples] = { -1, 0x10, -8192, 30 };
- bool result_s32 = media::AdjustVolume(samples_s32, sizeof(samples_s32),
- 4, // channels.
- sizeof(samples_s32[0]),
- 0.25f);
- EXPECT_TRUE(result_s32);
- int expected_test = memcmp(samples_s32, expected_s32, sizeof(expected_s32));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_u8_QuarterVolume) {
- // Test MixStreams() on 8 bit samples.
- uint8 dst_u8[kNumberOfSamples] = { 14, 0x44, 0x80, 0xff };
- uint8 src_u8[kNumberOfSamples] = { 4, 0x40, 0x80, 0xff };
- uint8 expected_u8[kNumberOfSamples] = { 0, /* saturation */
- (0x44 - 128) + (0x40 - 128) / 4 + 128,
- (0x80 - 128) + (0x80 - 128) / 4 + 128,
- 0xff /* saturation */ };
- media::MixStreams(dst_u8, src_u8, sizeof(dst_u8), sizeof(src_u8[0]), 0.25f);
- int expected_test = memcmp(dst_u8, expected_u8, sizeof(expected_u8));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_u8_FullVolume) {
- // Test MixStreams() on 8 bit samples.
- uint8 dst_u8[kNumberOfSamples] = { 44, 0x44, 0x80, 0xff };
- uint8 src_u8[kNumberOfSamples] = { 4, 0x40, 0x80, 0xff };
- uint8 expected_u8[kNumberOfSamples] = { 0, /* saturation */
- (0x44 - 128) + (0x40 - 128) + 128,
- (0x80 - 128) + (0x80 - 128) + 128,
- 0xff /* saturation */ };
- media::MixStreams(dst_u8, src_u8, sizeof(dst_u8), sizeof(src_u8[0]), 1.0f);
- int expected_test = memcmp(dst_u8, expected_u8, sizeof(expected_u8));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_s16_QuarterVolume) {
- // Test MixStreams() on 16 bit samples.
- int16 dst_s16[kNumberOfSamples] = { -4, 0x40, -32760, 32760 };
- int16 src_s16[kNumberOfSamples] = { -4, 0x40, -123, 123 };
- int16 expected_s16[kNumberOfSamples] = { -5, 0x50, -32768, 32767 };
- media::MixStreams(dst_s16,
- src_s16,
- sizeof(dst_s16),
- sizeof(src_s16[0]),
- 0.25f);
- int expected_test = memcmp(dst_s16, expected_s16, sizeof(expected_s16));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_s16_FullVolume) {
- // Test MixStreams() on 16 bit samples.
- int16 dst_s16[kNumberOfSamples] = { -4, 0x40, -32760, 32760 };
- int16 src_s16[kNumberOfSamples] = { -4, 0x40, -123, 123 };
- int16 expected_s16[kNumberOfSamples] = { -8, 0x80, -32768, 32767 };
- media::MixStreams(dst_s16,
- src_s16,
- sizeof(dst_s16),
- sizeof(src_s16[0]),
- 1.0f);
- int expected_test = memcmp(dst_s16, expected_s16, sizeof(expected_s16));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_s32_QuarterVolume) {
- // Test MixStreams() on 32 bit samples.
- int32 dst_s32[kNumberOfSamples] = { -4, 0x40, -32768, 2147483640 };
- int32 src_s32[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int32 expected_s32[kNumberOfSamples] = { -5, 0x50, -40960, 2147483647 };
- media::MixStreams(dst_s32,
- src_s32,
- sizeof(dst_s32),
- sizeof(src_s32[0]),
- 0.25f);
- int expected_test = memcmp(dst_s32, expected_s32, sizeof(expected_s32));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(AudioUtilTest, MixStreams_s32_FullVolume) {
- // Test MixStreams() on 32 bit samples.
- int32 dst_s32[kNumberOfSamples] = { -4, 0x40, -32768, 2147483640 };
- int32 src_s32[kNumberOfSamples] = { -4, 0x40, -32768, 123 };
- int32 expected_s32[kNumberOfSamples] = { -8, 0x80, -65536, 2147483647 };
- media::MixStreams(dst_s32,
- src_s32,
- sizeof(dst_s32),
- sizeof(src_s32[0]),
- 1.0);
- int expected_test = memcmp(dst_s32, expected_s32, sizeof(expected_s32));
- EXPECT_EQ(0, expected_test);
-}
-
-} // namespace media
diff --git a/src/media/audio/cross_process_notification.cc b/src/media/audio/cross_process_notification.cc
deleted file mode 100644
index 1806f77..0000000
--- a/src/media/audio/cross_process_notification.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-
-CrossProcessNotification::CrossProcessNotification() {}
-
-CrossProcessNotification::WaitForMultiple::WaitForMultiple(
- const Notifications* notifications) {
- Reset(notifications);
-}
-
-int CrossProcessNotification::WaitForMultiple::Wait() {
- DCHECK(CalledOnValidThread());
- int ret = WaitMultiple(*notifications_, wait_offset_);
- wait_offset_ = (ret + 1) % notifications_->size();
- return ret;
-}
-
-void CrossProcessNotification::WaitForMultiple::Reset(
- const Notifications* notifications) {
- DCHECK(CalledOnValidThread());
- wait_offset_ = 0;
- notifications_ = notifications;
- DCHECK(!notifications_->empty());
-}
diff --git a/src/media/audio/cross_process_notification.h b/src/media/audio/cross_process_notification.h
deleted file mode 100644
index cae7435..0000000
--- a/src/media/audio/cross_process_notification.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
-#define MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/process.h"
-#include "base/threading/non_thread_safe.h"
-#include "media/base/media_export.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#else
-#include "base/file_descriptor_posix.h"
-#include "base/sync_socket.h"
-#endif
-
-// A mechanism to synchronize access to a shared resource between two parties
-// when the usage pattern resembles that of two players playing a game of chess.
-// Each end has an instance of CrossProcessNotification and calls Signal() when
-// it has finished using the shared resource.
-// Before accessing the resource, it must call Wait() in order to know when the
-// other end has called Signal().
-//
-// Here's some pseudo code for how this class can be used:
-//
-// This method is used by both processes as it's a general way to use the
-// shared resource and then grant the privilege to the other process:
-//
-// void WriteToSharedMemory(CrossProcessNotification* notification,
-// SharedMemory* mem,
-// const char my_char) {
-// notification->Wait(); // Wait for the other process to yield access.
-// reinterpret_cast<char*>(mem->memory())[0] = my_char;
-// notification->Signal(); // Grant the other process access.
-// }
-//
-// Process A:
-//
-// class A {
-// public:
-// void Initialize(base::ProcessHandle process_b) {
-// mem_.CreateNamed("foo", false, 1024);
-//
-// CrossProcessNotification other;
-// CHECK(CrossProcessNotification::InitializePair(¬ification_, &other));
-// CrossProcessNotification::IPCHandle handle_1, handle_2;
-// CHECK(other.ShareToProcess(process_b, &handle_1, &handle_2));
-// // This could be implemented by using some IPC mechanism
-// // such as MessageLoop.
-// SendToProcessB(mem_, handle_1, handle_2);
-// // Allow process B the first chance to write to the memory:
-// notification_.Signal();
-// // Once B is done, we'll write 'A' to the shared memory.
-// WriteToSharedMemory(¬ification_, &mem_, 'A');
-// }
-//
-// CrossProcessNotification notification_;
-// SharedMemory mem_;
-// };
-//
-// Process B:
-//
-// class B {
-// public:
-// // Called when we receive the IPC message from A.
-// void Initialize(SharedMemoryHandle mem,
-// CrossProcessNotification::IPCHandle handle_1,
-// CrossProcessNotification::IPCHandle handle_2) {
-// mem_.reset(new SharedMemory(mem, false));
-// notification_.reset(new CrossProcessNotification(handle_1, handle_2));
-// WriteToSharedMemory(¬ification_, &mem_, 'B');
-// }
-//
-// CrossProcessNotification notification_;
-// scoped_ptr<SharedMemory> mem_;
-// };
-//
-class MEDIA_EXPORT CrossProcessNotification {
- public:
-#if defined(OS_WIN)
- typedef HANDLE IPCHandle;
-#else
- typedef base::FileDescriptor IPCHandle;
-#endif
-
- typedef std::vector<CrossProcessNotification*> Notifications;
-
- // Default ctor. Initializes a NULL notification. User must call
- // InitializePair() to initialize the instance along with a connected one.
- CrossProcessNotification();
-
- // Ctor for the user that does not call InitializePair but instead receives
- // handles from the one that did. These handles come from a call to
- // ShareToProcess.
- CrossProcessNotification(IPCHandle handle_1, IPCHandle handle_2);
- ~CrossProcessNotification();
-
- // Raises a signal that the shared resource now can be accessed by the other
- // party.
- // NOTE: Calling Signal() more than once without calling Wait() in between
- // is not a supported scenario and will result in undefined behavior (and
- // different depending on platform).
- void Signal();
-
- // Waits for the other party to finish using the shared resource.
- // NOTE: As with Signal(), you must not call Wait() more than once without
- // calling Signal() in between.
- void Wait();
-
- bool IsValid() const;
-
- // Copies the internal handles to the output parameters, |handle_1| and
- // |handle_2|. The operation can fail, so the caller must be prepared to
- // handle that case.
- bool ShareToProcess(base::ProcessHandle process, IPCHandle* handle_1,
- IPCHandle* handle_2);
-
- // Initializes a pair of CrossProcessNotification instances. Note that this
- // can fail (e.g. due to EMFILE on Linux).
- static bool InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b);
-
- // Use an instance of this class when you have to repeatedly wait for multiple
- // notifications on the same thread. The class will store information about
- // which notification was last signaled and try to distribute the signals so
- // that all notifications get a chance to be processed in times of high load
- // and a busy one won't starve the others.
- // TODO(tommi): Support a way to abort the wait.
- class MEDIA_EXPORT WaitForMultiple :
- public NON_EXPORTED_BASE(base::NonThreadSafe) {
- public:
- // Caller must make sure that the lifetime of the array is greater than
- // that of the WaitForMultiple instance.
- explicit WaitForMultiple(const Notifications* notifications);
-
- // Waits for any of the notifications to be signaled. Returns the 0 based
- // index of a signaled notification.
- int Wait();
-
- // Call when the array changes. This should be called on the same thread
- // as Wait() is called on and the array must never change while a Wait()
- // is in progress.
- void Reset(const Notifications* notifications);
-
- private:
- const Notifications* notifications_;
- size_t wait_offset_;
- };
-
- private:
- // Only called by the WaitForMultiple class. See documentation
- // for WaitForMultiple and comments inside WaitMultiple for details.
- static int WaitMultiple(const Notifications& notifications,
- size_t wait_offset);
-
-#if defined(OS_WIN)
- base::win::ScopedHandle mine_;
- base::win::ScopedHandle other_;
-#else
- typedef base::CancelableSyncSocket SocketClass;
- SocketClass socket_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(CrossProcessNotification);
-};
-
-#endif // MEDIA_AUDIO_CROSS_PROCESS_NOTIFICATION_H_
diff --git a/src/media/audio/cross_process_notification_posix.cc b/src/media/audio/cross_process_notification_posix.cc
deleted file mode 100644
index 070ef06..0000000
--- a/src/media/audio/cross_process_notification_posix.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include <errno.h>
-#include <sys/poll.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/file_descriptor_posix.h"
-
-CrossProcessNotification::~CrossProcessNotification() {}
-
-CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
- IPCHandle handle_2)
- : socket_(handle_1.fd) {
- DCHECK_NE(handle_1.fd, -1);
- DCHECK_EQ(handle_2.fd, -1);
- DCHECK(IsValid());
-}
-
-void CrossProcessNotification::Signal() {
- DCHECK(IsValid());
- char signal = 1;
- size_t bytes = socket_.Send(&signal, sizeof(signal));
- DCHECK_EQ(bytes, 1U) << "errno: " << errno;
-}
-
-void CrossProcessNotification::Wait() {
- DCHECK(IsValid());
- char signal = 0;
- size_t bytes = socket_.Receive(&signal, sizeof(signal));
- DCHECK_EQ(bytes, 1U) << "errno: " << errno;
- DCHECK_EQ(signal, 1);
-}
-
-bool CrossProcessNotification::IsValid() const {
- return socket_.handle() != SocketClass::kInvalidHandle;
-}
-
-bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
- IPCHandle* handle_1,
- IPCHandle* handle_2) {
- DCHECK(IsValid());
- handle_1->fd = socket_.handle();
- handle_1->auto_close = false;
- handle_2->fd = -1;
- return true;
-}
-
-// static
-bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b) {
- DCHECK(!a->IsValid());
- DCHECK(!b->IsValid());
-
- bool ok = SocketClass::CreatePair(&a->socket_, &b->socket_);
-
- DLOG_IF(WARNING, !ok) << "failed to create socket: " << errno;
- DCHECK(!ok || a->IsValid());
- DCHECK(!ok || b->IsValid());
- return ok;
-}
-
-// static
-int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
- size_t wait_offset) {
- DCHECK_LT(wait_offset, notifications.size());
-
- for (size_t i = 0; i < notifications.size(); ++i) {
- DCHECK(notifications[i]->IsValid());
- }
-
- // Below, we always check the |revents| of the first socket in the array
- // and return the index of that socket if set. This can cause sockets
- // that come later in the array to starve when the first sockets are
- // very busy. So to avoid the starving problem, we use the |wait_offset|
- // variable to split up the array so that the last socket to be signaled
- // becomes the last socket in the array and all the other sockets will have
- // priority the next time WaitMultiple is called.
- scoped_array<struct pollfd> sockets(new struct pollfd[notifications.size()]);
- memset(&sockets[0], 0, notifications.size() * sizeof(sockets[0]));
- size_t index = 0;
- for (size_t i = wait_offset; i < notifications.size(); ++i) {
- struct pollfd& fd = sockets[index++];
- fd.events = POLLIN;
- fd.fd = notifications[i]->socket_.handle();
- }
-
- for (size_t i = 0; i < wait_offset; ++i) {
- struct pollfd& fd = sockets[index++];
- fd.events = POLLIN;
- fd.fd = notifications[i]->socket_.handle();
- }
- DCHECK_EQ(index, notifications.size());
-
- int err = poll(&sockets[0], notifications.size(), -1);
- if (err != -1) {
- for (size_t i = 0; i < notifications.size(); ++i) {
- if (sockets[i].revents) {
- size_t ret = (i + wait_offset) % notifications.size();
- DCHECK_EQ(sockets[i].fd, notifications[ret]->socket_.handle());
- notifications[ret]->Wait();
- return ret;
- }
- }
- }
- // Either poll() failed or we failed to find a single socket that was
- // signaled. Either way continuing will result in undefined behavior.
- LOG(FATAL) << "poll() failed: " << errno;
- return -1;
-}
diff --git a/src/media/audio/cross_process_notification_unittest.cc b/src/media/audio/cross_process_notification_unittest.cc
deleted file mode 100644
index d1fbead..0000000
--- a/src/media/audio/cross_process_notification_unittest.cc
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/shared_memory.h"
-#include "base/stl_util.h"
-#include "base/test/multiprocess_test.h"
-#include "base/threading/platform_thread.h"
-#include "media/audio/cross_process_notification.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/multiprocess_func_list.h"
-
-#include <utility> // NOLINT
-
-namespace {
-
-// Initializes (ctor) and deletes (dtor) two vectors of pairs of
-// CrossProcessNotification instances.
-class NotificationsOwner {
- public:
- // Attempts to create up to |number_of_pairs| number of pairs. Call size()
- // after construction to find out how many pairs were actually created.
- explicit NotificationsOwner(size_t number_of_pairs) {
- CreateMultiplePairs(number_of_pairs);
- }
- ~NotificationsOwner() {
- STLDeleteElements(&a_);
- STLDeleteElements(&b_);
- }
-
- size_t size() const {
- DCHECK_EQ(a_.size(), b_.size());
- return a_.size();
- }
-
- const CrossProcessNotification::Notifications& a() { return a_; }
- const CrossProcessNotification::Notifications& b() { return b_; }
-
- private:
- void CreateMultiplePairs(size_t count) {
- a_.resize(count);
- b_.resize(count);
- size_t i = 0;
- for (; i < count; ++i) {
- a_[i] = new CrossProcessNotification();
- b_[i] = new CrossProcessNotification();
- if (!CrossProcessNotification::InitializePair(a_[i], b_[i])) {
- LOG(WARNING) << "InitializePair failed at " << i;
- delete a_[i];
- delete b_[i];
- break;
- }
- }
- a_.resize(i);
- b_.resize(i);
- }
-
- CrossProcessNotification::Notifications a_;
- CrossProcessNotification::Notifications b_;
-};
-
-// A simple thread that we'll run two instances of. Both threads get a pointer
-// to the same |shared_data| and use a CrossProcessNotification to control when
-// each thread can read/write.
-class SingleNotifierWorker : public base::PlatformThread::Delegate {
- public:
- SingleNotifierWorker(size_t* shared_data, size_t repeats,
- CrossProcessNotification* notifier)
- : shared_data_(shared_data), repeats_(repeats),
- notifier_(notifier) {
- }
- virtual ~SingleNotifierWorker() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- for (size_t i = 0; i < repeats_; ++i) {
- notifier_->Wait();
- ++(*shared_data_);
- notifier_->Signal();
- }
- }
-
- private:
- size_t* shared_data_;
- size_t repeats_;
- CrossProcessNotification* notifier_;
- DISALLOW_COPY_AND_ASSIGN(SingleNotifierWorker);
-};
-
-// Similar to SingleNotifierWorker, except each instance of this class will
-// have >1 instances of CrossProcessNotification to Wait/Signal and an equal
-// amount of |shared_data| that the notifiers control access to.
-class MultiNotifierWorker : public base::PlatformThread::Delegate {
- public:
- MultiNotifierWorker(size_t* shared_data, size_t repeats,
- const CrossProcessNotification::Notifications* notifiers)
- : shared_data_(shared_data), repeats_(repeats),
- notifiers_(notifiers) {
- }
- virtual ~MultiNotifierWorker() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- CrossProcessNotification::WaitForMultiple waiter(notifiers_);
- for (size_t i = 0; i < repeats_; ++i) {
- int signaled = waiter.Wait();
- ++shared_data_[signaled];
- (*notifiers_)[signaled]->Signal();
- }
- }
-
- private:
- size_t* shared_data_;
- size_t repeats_;
- const CrossProcessNotification::Notifications* notifiers_;
- DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorker);
-};
-
-// A fixed array of bool flags. Each flag uses 1 bit. Use sizeof(FlagArray)
-// to determine how much memory you need. The number of flags will therefore
-// be sizeof(FlagArray) * 8.
-// We use 'struct' to signify that this structures represents compiler
-// independent structured data. I.e. you must be able to map this class
-// to a piece of shared memory of size sizeof(FlagArray) and be able to
-// use the class. No vtables etc.
-// TODO(tommi): Move this to its own header when we start using it for signaling
-// audio devices. As is, it's just here for perf comparison against the
-// "multiple notifiers" approach.
-struct FlagArray {
- public:
- FlagArray() : flags_() {}
-
- bool is_set(size_t index) const {
- return (flags_[index >> 5] & (1 << (index & 31)));
- }
-
- void set(size_t index) {
- flags_[index >> 5] |= (1U << (static_cast<uint32>(index) & 31));
- }
-
- void clear(size_t index) {
- flags_[index >> 5] &= ~(1U << (static_cast<uint32>(index) & 31));
- }
-
- // Returns the number of flags that can be set/checked.
- size_t size() const { return sizeof(flags_) * 8; }
-
- private:
- // 256 * 32 = 8192 flags in 1KB.
- uint32 flags_[256];
- DISALLOW_COPY_AND_ASSIGN(FlagArray);
-};
-
-class MultiNotifierWorkerFlagArray : public base::PlatformThread::Delegate {
- public:
- MultiNotifierWorkerFlagArray(size_t count, FlagArray* signals,
- size_t* shared_data, size_t repeats,
- CrossProcessNotification* notifier)
- : count_(count), signals_(signals), shared_data_(shared_data),
- repeats_(repeats), notifier_(notifier) {
- }
- virtual ~MultiNotifierWorkerFlagArray() {}
-
- // base::PlatformThread::Delegate:
- virtual void ThreadMain() OVERRIDE {
- for (size_t i = 0; i < repeats_; ++i) {
- notifier_->Wait();
- for (size_t s = 0; s < count_; ++s) {
- if (signals_->is_set(s)) {
- ++shared_data_[s];
- // We don't clear the flag here but simply leave it signaled because
- // we want the other thread to also increment this variable.
- }
- }
- notifier_->Signal();
- }
- }
-
- private:
- size_t count_;
- FlagArray* signals_;
- size_t* shared_data_;
- size_t repeats_;
- CrossProcessNotification* notifier_;
- DISALLOW_COPY_AND_ASSIGN(MultiNotifierWorkerFlagArray);
-};
-
-} // end namespace
-
-TEST(CrossProcessNotification, FlagArray) {
- FlagArray flags;
- EXPECT_GT(flags.size(), 1000U);
- for (size_t i = 0; i < flags.size(); ++i) {
- EXPECT_FALSE(flags.is_set(i));
- flags.set(i);
- EXPECT_TRUE(flags.is_set(i));
- flags.clear(i);
- EXPECT_FALSE(flags.is_set(i));
- }
-}
-
-// Initializes two notifiers, signals the each one and make sure the others
-// wait is satisfied.
-TEST(CrossProcessNotification, Basic) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
- EXPECT_TRUE(a.IsValid());
- EXPECT_TRUE(b.IsValid());
-
- a.Signal();
- b.Wait();
-
- b.Signal();
- a.Wait();
-}
-
-// Spins two worker threads, each with their own CrossProcessNotification
-// that they use to read and write from a shared memory buffer.
-// Disabled as it trips of the TSAN bot (false positive since TSAN doesn't
-// recognize sockets as being a synchronization primitive).
-TEST(CrossProcessNotification, DISABLED_TwoThreads) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
-
- size_t data = 0;
- const size_t kRepeats = 10000;
- SingleNotifierWorker worker1(&data, kRepeats, &a);
- SingleNotifierWorker worker2(&data, kRepeats, &b);
- base::PlatformThreadHandle thread1, thread2;
- base::PlatformThread::Create(0, &worker1, &thread1);
- base::PlatformThread::Create(0, &worker2, &thread2);
-
- // Start the first thread. They should ping pong a few times and take turns
- // incrementing the shared variable and never step on each other's toes.
- a.Signal();
-
- base::PlatformThread::Join(thread1);
- base::PlatformThread::Join(thread2);
-
- EXPECT_EQ(kRepeats * 2, data);
-}
-
-// Uses a pair of threads to access up to 1000 pieces of synchronized shared
-// data. On regular dev machines, the number of notifiers should be 1000, but on
-// mac and linux bots, the number will be smaller due to the RLIMIT_NOFILE
-// limit. Specifically, linux will have this limit at 1024 which means for this
-// test that the max number of notifiers will be in the range 500-512. On Mac
-// the limit is 256, so |count| will be ~120. Oh, and raising the limit via
-// setrlimit() won't work.
-// DISABLED since the distribution won't be accurate when run on valgrind.
-TEST(CrossProcessNotification, DISABLED_ThousandNotifiersTwoThreads) {
- const size_t kCount = 1000;
- NotificationsOwner pairs(kCount);
- size_t data[kCount] = {0};
- // We use a multiple of the count so that the division in the check below
- // will be nice and round.
- size_t repeats = pairs.size() * 1;
-
- MultiNotifierWorker worker_1(&data[0], repeats, &pairs.a());
- MultiNotifierWorker worker_2(&data[0], repeats, &pairs.b());
- base::PlatformThreadHandle thread_1, thread_2;
- base::PlatformThread::Create(0, &worker_1, &thread_1);
- base::PlatformThread::Create(0, &worker_2, &thread_2);
-
- for (size_t i = 0; i < pairs.size(); ++i)
- pairs.a()[i]->Signal();
-
- base::PlatformThread::Join(thread_1);
- base::PlatformThread::Join(thread_2);
-
- size_t expected_total = pairs.size() * 2;
- size_t total = 0;
- for (size_t i = 0; i < pairs.size(); ++i) {
- // The CrossProcessNotification::WaitForMultiple class should have ensured
- // that all notifiers had the same quality of service.
- EXPECT_EQ(expected_total / pairs.size(), data[i]);
- total += data[i];
- }
- EXPECT_EQ(expected_total, total);
-}
-
-// Functionally equivalent (as far as the shared data goes) to the
-// ThousandNotifiersTwoThreads test but uses a single pair of notifiers +
-// FlagArray for the 1000 signals. This approach is significantly faster.
-// Disabled as it trips of the TSAN bot - "Possible data race during write of
-// size 4" (the flag array).
-TEST(CrossProcessNotification, DISABLED_TwoNotifiersTwoThreads1000Signals) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
-
- const size_t kCount = 1000;
- FlagArray signals;
- ASSERT_GE(signals.size(), kCount);
- size_t data[kCount] = {0};
-
- // Since this algorithm checks all events each time the notifier is
- // signaled, |repeat| doesn't mean the same thing here as it does in
- // ThousandNotifiersTwoThreads. 1 repeat here is the same as kCount
- // repeats in ThousandNotifiersTwoThreads.
- size_t repeats = 1;
- MultiNotifierWorkerFlagArray worker1(kCount, &signals, &data[0], repeats, &a);
- MultiNotifierWorkerFlagArray worker2(kCount, &signals, &data[0], repeats, &b);
- base::PlatformThreadHandle thread1, thread2;
- base::PlatformThread::Create(0, &worker1, &thread1);
- base::PlatformThread::Create(0, &worker2, &thread2);
-
- for (size_t i = 0; i < kCount; ++i)
- signals.set(i);
- a.Signal();
-
- base::PlatformThread::Join(thread1);
- base::PlatformThread::Join(thread2);
-
- size_t expected_total = kCount * 2;
- size_t total = 0;
- for (size_t i = 0; i < kCount; ++i) {
- // Since for each signal, we process all signaled events, the shared data
- // variables should all be equal.
- EXPECT_EQ(expected_total / kCount, data[i]);
- total += data[i];
- }
- EXPECT_EQ(expected_total, total);
-}
-
-// Test the maximum number of notifiers without spinning further wait
-// threads on Windows. This test assumes we can always create 64 pairs and
-// bails if we can't.
-TEST(CrossProcessNotification, MultipleWaits64) {
- const size_t kCount = 64;
- NotificationsOwner pairs(kCount);
- ASSERT_TRUE(pairs.size() == kCount);
-
- CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
- for (size_t i = 0; i < kCount; ++i) {
- pairs.a()[i]->Signal();
- int index = waiter.Wait();
- EXPECT_EQ(i, static_cast<size_t>(index));
- }
-}
-
-// Tests waiting for more notifiers than the OS supports on one thread.
-// The test will create at most 1000 pairs, but on mac/linux bots the actual
-// number will be lower. See comment about the RLIMIT_NOFILE limit above for
-// more details.
-// DISABLED since the distribution won't be accurate when run on valgrind.
-TEST(CrossProcessNotification, DISABLED_MultipleWaits1000) {
- // A 1000 notifiers requires 16 threads on Windows, including the current
- // one, to perform the wait operation.
- const size_t kCount = 1000;
- NotificationsOwner pairs(kCount);
-
- for (size_t i = 0; i < pairs.size(); ++i) {
- pairs.a()[i]->Signal();
- // To disable the load distribution algorithm and force the extra worker
- // thread(s) to catch the signaled event, we define the |waiter| inside
- // the loop.
- CrossProcessNotification::WaitForMultiple waiter(&pairs.b());
- int index = waiter.Wait();
- EXPECT_EQ(i, static_cast<size_t>(index));
- }
-}
-
-class CrossProcessNotificationMultiProcessTest : public base::MultiProcessTest {
-};
-
-namespace {
-
-// A very crude IPC mechanism that we use to set up the spawned child process
-// and the parent process.
-struct CrudeIpc {
- uint8 ready;
- CrossProcessNotification::IPCHandle handle_1;
- CrossProcessNotification::IPCHandle handle_2;
-};
-
-#if defined(OS_POSIX)
-const int kPosixChildSharedMem = 30;
-#else
-const char kSharedMemName[] = "CrossProcessNotificationMultiProcessTest";
-#endif
-
-const size_t kSharedMemSize = 1024;
-
-} // namespace
-
-// The main routine of the child process. Waits for the parent process
-// to copy handles over to the child and then uses a CrossProcessNotification to
-// wait and signal to the parent process.
-MULTIPROCESS_TEST_MAIN(CrossProcessNotificationChildMain) {
-#if defined(OS_POSIX)
- base::SharedMemory mem(
- base::SharedMemoryHandle(kPosixChildSharedMem, true /* auto close */),
- false);
-#else
- base::SharedMemory mem;
- CHECK(mem.CreateNamed(kSharedMemName, true, kSharedMemSize));
-#endif
-
- CHECK(mem.Map(kSharedMemSize));
- CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
-
- while (!ipc->ready)
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
-
- CrossProcessNotification notifier(ipc->handle_1, ipc->handle_2);
- notifier.Wait();
- notifier.Signal();
-
- return 0;
-}
-
-// Spawns a new process and hands a CrossProcessNotification instance to the
-// new process. Once that's done, it waits for the child process to signal
-// it's end and quits.
-TEST_F(CrossProcessNotificationMultiProcessTest, Basic) {
- CrossProcessNotification a, b;
- ASSERT_TRUE(CrossProcessNotification::InitializePair(&a, &b));
- EXPECT_TRUE(a.IsValid());
- EXPECT_TRUE(b.IsValid());
-
- base::SharedMemory mem;
-
-#if defined(OS_POSIX)
- ASSERT_TRUE(mem.CreateAndMapAnonymous(kSharedMemSize));
-#else
- mem.Delete(kSharedMemName); // In case a previous run was unsuccessful.
- ASSERT_TRUE(mem.CreateNamed(kSharedMemName, false, kSharedMemSize));
- ASSERT_TRUE(mem.Map(kSharedMemSize));
-#endif
-
- CrudeIpc* ipc = reinterpret_cast<CrudeIpc*>(mem.memory());
- ipc->ready = false;
-
-#if defined(OS_POSIX)
- const int kPosixChildSocket = 20;
- EXPECT_TRUE(b.ShareToProcess(
- base::kNullProcessHandle, &ipc->handle_1, &ipc->handle_2));
- base::FileHandleMappingVector fd_mapping_vec;
- fd_mapping_vec.push_back(std::make_pair(ipc->handle_1.fd, kPosixChildSocket));
- fd_mapping_vec.push_back(
- std::make_pair(mem.handle().fd, kPosixChildSharedMem));
- ipc->handle_1.fd = kPosixChildSocket;
- base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
- fd_mapping_vec, false);
-#else
- base::ProcessHandle process = SpawnChild("CrossProcessNotificationChildMain",
- false);
- EXPECT_TRUE(b.ShareToProcess(process, &ipc->handle_1, &ipc->handle_2));
-#endif
-
- ipc->ready = true;
-
- a.Signal();
- a.Wait();
-
- int exit_code = -1;
- base::WaitForExitCode(process, &exit_code);
- EXPECT_EQ(0, exit_code);
-}
diff --git a/src/media/audio/cross_process_notification_win.cc b/src/media/audio/cross_process_notification_win.cc
deleted file mode 100644
index b454cbf..0000000
--- a/src/media/audio/cross_process_notification_win.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/cross_process_notification.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/platform_thread.h"
-#include "base/win/scoped_handle.h"
-
-CrossProcessNotification::~CrossProcessNotification() {}
-
-CrossProcessNotification::CrossProcessNotification(IPCHandle handle_1,
- IPCHandle handle_2)
- : mine_(handle_1), other_(handle_2) {
- DCHECK(IsValid());
-}
-
-void CrossProcessNotification::Signal() {
- DCHECK(IsValid());
- DCHECK_EQ(::WaitForSingleObject(mine_, 0), static_cast<DWORD>(WAIT_TIMEOUT))
- << "Are you calling Signal() without calling Wait() first?";
- BOOL ok = ::SetEvent(mine_);
- CHECK(ok);
-}
-
-void CrossProcessNotification::Wait() {
- DCHECK(IsValid());
- DWORD wait = ::WaitForSingleObject(other_, INFINITE);
- DCHECK_EQ(wait, WAIT_OBJECT_0);
- BOOL ok = ::ResetEvent(other_);
- CHECK(ok);
-}
-
-bool CrossProcessNotification::IsValid() const {
- return mine_.IsValid() && other_.IsValid();
-}
-
-bool CrossProcessNotification::ShareToProcess(base::ProcessHandle process,
- IPCHandle* handle_1,
- IPCHandle* handle_2) {
- DCHECK(IsValid());
- HANDLE our_process = ::GetCurrentProcess();
- if (!::DuplicateHandle(our_process, mine_, process, handle_1, 0, FALSE,
- DUPLICATE_SAME_ACCESS)) {
- return false;
- }
-
- if (!::DuplicateHandle(our_process, other_, process, handle_2, 0, FALSE,
- DUPLICATE_SAME_ACCESS)) {
- // In case we're sharing to ourselves, we can close the handle, but
- // if the target process is a different process, we do nothing.
- if (process == our_process)
- ::CloseHandle(*handle_1);
- *handle_1 = NULL;
- return false;
- }
-
- return true;
-}
-
-// static
-bool CrossProcessNotification::InitializePair(CrossProcessNotification* a,
- CrossProcessNotification* b) {
- DCHECK(!a->IsValid());
- DCHECK(!b->IsValid());
-
- bool success = false;
-
- // Create two manually resettable events and give each party a handle
- // to both events.
- HANDLE event_a = ::CreateEvent(NULL, TRUE, FALSE, NULL);
- HANDLE event_b = ::CreateEvent(NULL, TRUE, FALSE, NULL);
- if (event_a && event_b) {
- a->mine_.Set(event_a);
- a->other_.Set(event_b);
- success = a->ShareToProcess(GetCurrentProcess(), &event_a, &event_b);
- if (success) {
- b->mine_.Set(event_b);
- b->other_.Set(event_a);
- } else {
- a->mine_.Close();
- a->other_.Close();
- }
- } else {
- if (event_a)
- ::CloseHandle(event_a);
- if (event_b)
- ::CloseHandle(event_b);
- }
-
- DCHECK(!success || a->IsValid());
- DCHECK(!success || b->IsValid());
-
- return success;
-}
-
-namespace {
-class ExtraWaitThread : public base::PlatformThread::Delegate {
- public:
- ExtraWaitThread(HANDLE stop, HANDLE* events, size_t count,
- int* signaled_event)
- : stop_(stop), events_(events), count_(count),
- signaled_event_(signaled_event) {
- *signaled_event_ = -1;
- }
- virtual ~ExtraWaitThread() {}
-
- virtual void ThreadMain() OVERRIDE {
- // Store the |stop_| event as the first event.
- HANDLE events[MAXIMUM_WAIT_OBJECTS] = { stop_ };
- HANDLE next_thread = NULL;
- DWORD event_count = MAXIMUM_WAIT_OBJECTS;
- int thread_signaled_event = -1;
- scoped_ptr<ExtraWaitThread> extra_wait_thread;
- if (count_ > (MAXIMUM_WAIT_OBJECTS - 1)) {
- std::copy(&events_[0], &events_[MAXIMUM_WAIT_OBJECTS - 2], &events[1]);
-
- extra_wait_thread.reset(new ExtraWaitThread(stop_,
- &events_[MAXIMUM_WAIT_OBJECTS - 2],
- count_ - (MAXIMUM_WAIT_OBJECTS - 2),
- &thread_signaled_event));
- base::PlatformThread::Create(0, extra_wait_thread.get(), &next_thread);
-
- event_count = MAXIMUM_WAIT_OBJECTS;
- events[MAXIMUM_WAIT_OBJECTS - 1] = next_thread;
- } else {
- std::copy(&events_[0], &events_[count_], &events[1]);
- event_count = count_ + 1;
- }
-
- DWORD wait = ::WaitForMultipleObjects(event_count, &events[0], FALSE,
- INFINITE);
- if (wait >= WAIT_OBJECT_0 && wait < (WAIT_OBJECT_0 + event_count)) {
- wait -= WAIT_OBJECT_0;
- if (wait == 0) {
- // The stop event was signaled. Check if it was signaled by a
- // sub thread. In case our sub thread had to spin another thread (and
- // so on), we must wait for ours to exit before we can check the
- // propagated event offset.
- if (next_thread) {
- base::PlatformThread::Join(next_thread);
- next_thread = NULL;
- }
- if (thread_signaled_event != -1)
- *signaled_event_ = thread_signaled_event + (MAXIMUM_WAIT_OBJECTS - 2);
- } else if (events[wait] == next_thread) {
- NOTREACHED();
- } else {
- *signaled_event_ = static_cast<int>(wait);
- SetEvent(stop_);
- }
- } else {
- NOTREACHED();
- }
-
- if (next_thread)
- base::PlatformThread::Join(next_thread);
- }
-
- private:
- HANDLE stop_;
- HANDLE* events_;
- size_t count_;
- int* signaled_event_;
- DISALLOW_COPY_AND_ASSIGN(ExtraWaitThread);
-};
-} // end namespace
-
-// static
-int CrossProcessNotification::WaitMultiple(const Notifications& notifications,
- size_t wait_offset) {
- DCHECK_LT(wait_offset, notifications.size());
-
- for (size_t i = 0; i < notifications.size(); ++i) {
- DCHECK(notifications[i]->IsValid());
- }
-
- // TODO(tommi): Should we wait in an alertable state so that we can be
- // canceled via an APC?
- scoped_array<HANDLE> handles(new HANDLE[notifications.size()]);
-
- // Because of the way WaitForMultipleObjects works, we do a little trick here.
- // When multiple events are signaled, WaitForMultipleObjects will return the
- // index of the first signaled item (lowest). This means that if we always
- // pass the array the same way to WaitForMultipleObjects, the objects that
- // come first, have higher priority. In times of heavy load, this will cause
- // elements at the back to become DOS-ed.
- // So, we store the location of the item that was last signaled. Then we split
- // up the array and move everything higher than the last signaled index to the
- // front and the rest to the back (meaning that the last signaled item will
- // become the last element in the list).
- // Assuming equally busy events, this approach distributes the priority
- // evenly.
-
- size_t index = 0;
- for (size_t i = wait_offset; i < notifications.size(); ++i)
- handles[index++] = notifications[i]->other_;
-
- for (size_t i = 0; i < wait_offset; ++i)
- handles[index++] = notifications[i]->other_;
- DCHECK_EQ(index, notifications.size());
-
- DWORD wait = WAIT_FAILED;
- bool wait_failed = false;
- if (notifications.size() <= MAXIMUM_WAIT_OBJECTS) {
- wait = ::WaitForMultipleObjects(notifications.size(), &handles[0], FALSE,
- INFINITE);
- wait_failed = wait < WAIT_OBJECT_0 ||
- wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
- } else {
- // Used to stop the other wait threads when an event has been signaled.
- base::win::ScopedHandle stop(::CreateEvent(NULL, TRUE, FALSE, NULL));
-
- // Create the first thread and pass a pointer to all handles >63
- // to the thread + 'stop'. Then implement the thread so that it checks
- // if the number of handles is > 63. If so, spawns a new thread and
- // passes >62 handles to that thread and waits for the 62 handles + stop +
- // next thread. etc etc.
-
- // Create a list of threads so that each thread waits on at most 62 events
- // including one event for when a child thread signals completion and one
- // event for when all of the threads must be stopped (due to some event
- // being signaled).
-
- int thread_signaled_event = -1;
- ExtraWaitThread wait_thread(stop, &handles[MAXIMUM_WAIT_OBJECTS - 1],
- notifications.size() - (MAXIMUM_WAIT_OBJECTS - 1),
- &thread_signaled_event);
- base::PlatformThreadHandle thread;
- base::PlatformThread::Create(0, &wait_thread, &thread);
- HANDLE events[MAXIMUM_WAIT_OBJECTS];
- std::copy(&handles[0], &handles[MAXIMUM_WAIT_OBJECTS - 1], &events[0]);
- events[MAXIMUM_WAIT_OBJECTS - 1] = thread;
- wait = ::WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, &events[0], FALSE,
- INFINITE);
- wait_failed = wait < WAIT_OBJECT_0 ||
- wait >= (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS);
- if (wait == WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 1)) {
- if (thread_signaled_event < 0) {
- wait_failed = true;
- NOTREACHED();
- } else {
- wait = WAIT_OBJECT_0 + (MAXIMUM_WAIT_OBJECTS - 2) +
- thread_signaled_event;
- }
- } else {
- ::SetEvent(stop);
- }
- base::PlatformThread::Join(thread);
- }
-
- int ret = -1;
- if (!wait_failed) {
- // Subtract to be politically correct (WAIT_OBJECT_0 is actually 0).
- wait -= WAIT_OBJECT_0;
- BOOL ok = ::ResetEvent(handles[wait]);
- CHECK(ok);
- ret = (wait + wait_offset) % notifications.size();
- DCHECK_EQ(handles[wait], notifications[ret]->other_.Get());
- } else {
- NOTREACHED();
- }
-
- CHECK_NE(ret, -1);
- return ret;
-}
diff --git a/src/media/audio/fake_audio_input_stream.cc b/src/media/audio/fake_audio_input_stream.cc
deleted file mode 100644
index d2b1ce9..0000000
--- a/src/media/audio/fake_audio_input_stream.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/fake_audio_input_stream.h"
-
-#include "base/bind.h"
-#include "base/lazy_instance.h"
-#include "media/audio/audio_manager_base.h"
-
-using base::Time;
-using base::TimeDelta;
-
-namespace media {
-
-namespace {
-
-// These values are based on experiments for local-to-local
-// PeerConnection to demonstrate audio/video synchronization.
-const int kBeepDurationMilliseconds = 20;
-const int kBeepFrequency = 400;
-
-struct BeepContext {
- BeepContext() : beep_once(false) {}
- base::Lock beep_lock;
- bool beep_once;
-};
-
-static base::LazyInstance<BeepContext> g_beep_context =
- LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-AudioInputStream* FakeAudioInputStream::MakeFakeStream(
- AudioManagerBase* manager,
- const AudioParameters& params) {
- return new FakeAudioInputStream(manager, params);
-}
-
-FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
- const AudioParameters& params)
- : audio_manager_(manager),
- callback_(NULL),
- buffer_size_((params.channels() * params.bits_per_sample() *
- params.frames_per_buffer()) / 8),
- params_(params),
- thread_("FakeAudioRecordingThread"),
- callback_interval_(base::TimeDelta::FromMilliseconds(
- (params.frames_per_buffer() * 1000) / params.sample_rate())),
- beep_duration_in_buffers_(
- kBeepDurationMilliseconds * params.sample_rate() /
- params.frames_per_buffer() / 1000),
- beep_generated_in_buffers_(0),
- beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
- frames_elapsed_(0) {
-}
-
-FakeAudioInputStream::~FakeAudioInputStream() {}
-
-bool FakeAudioInputStream::Open() {
- buffer_.reset(new uint8[buffer_size_]);
- memset(buffer_.get(), 0, buffer_size_);
- return true;
-}
-
-void FakeAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(!thread_.IsRunning());
- callback_ = callback;
- last_callback_time_ = Time::Now();
- thread_.Start();
- thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
- callback_interval_);
-}
-
-void FakeAudioInputStream::DoCallback() {
- DCHECK(callback_);
-
- memset(buffer_.get(), 0, buffer_size_);
-
- bool should_beep = false;
- {
- BeepContext* beep_context = g_beep_context.Pointer();
- base::AutoLock auto_lock(beep_context->beep_lock);
- should_beep = beep_context->beep_once;
- beep_context->beep_once = false;
- }
-
- // If this object was instructed to generate a beep or has started to
- // generate a beep sound.
- if (should_beep || beep_generated_in_buffers_) {
- // Compute the number of frames to output high value. Then compute the
- // number of bytes based on channels and bits per channel.
- int high_frames = beep_period_in_frames_ / 2;
- int high_bytes = high_frames * params_.bits_per_sample() *
- params_.channels() / 8;
-
- // Separate high and low with the same number of bytes to generate a
- // square wave.
- int position = 0;
- while (position + high_bytes <= buffer_size_) {
- // Write high values first.
- memset(buffer_.get() + position, 128, high_bytes);
-
- // Then leave low values in the buffer with |high_bytes|.
- position += high_bytes * 2;
- }
-
- ++beep_generated_in_buffers_;
- if (beep_generated_in_buffers_ >= beep_duration_in_buffers_)
- beep_generated_in_buffers_ = 0;
- }
-
- callback_->OnData(this, buffer_.get(), buffer_size_, buffer_size_, 1.0);
- frames_elapsed_ += params_.frames_per_buffer();
-
- Time now = Time::Now();
- base::TimeDelta next_callback_time =
- last_callback_time_ + callback_interval_ * 2 - now;
-
- // If we are falling behind, try to catch up as much as we can in the next
- // callback.
- if (next_callback_time < base::TimeDelta())
- next_callback_time = base::TimeDelta();
-
- last_callback_time_ = now;
- thread_.message_loop()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
- next_callback_time);
-}
-
-void FakeAudioInputStream::Stop() {
- thread_.Stop();
-}
-
-void FakeAudioInputStream::Close() {
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
- audio_manager_->ReleaseInputStream(this);
-}
-
-double FakeAudioInputStream::GetMaxVolume() {
- return 1.0;
-}
-
-void FakeAudioInputStream::SetVolume(double volume) {
-}
-
-double FakeAudioInputStream::GetVolume() {
- return 1.0;
-}
-
-void FakeAudioInputStream::SetAutomaticGainControl(bool enabled) {}
-
-bool FakeAudioInputStream::GetAutomaticGainControl() {
- return true;
-}
-
-// static
-void FakeAudioInputStream::BeepOnce() {
- BeepContext* beep_context = g_beep_context.Pointer();
- base::AutoLock auto_lock(beep_context->beep_lock);
- beep_context->beep_once = true;
-}
-
-} // namespace media
diff --git a/src/media/audio/fake_audio_input_stream.h b/src/media/audio/fake_audio_input_stream.h
deleted file mode 100644
index c1e1ba5..0000000
--- a/src/media/audio/fake_audio_input_stream.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A fake implementation of AudioInputStream, useful for testing purpose.
-
-#ifndef MEDIA_AUDIO_FAKE_AUDIO_INPUT_STREAM_H_
-#define MEDIA_AUDIO_FAKE_AUDIO_INOUT_STREAM_H_
-
-#include <vector>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerBase;
-
-class MEDIA_EXPORT FakeAudioInputStream
- : public AudioInputStream {
- public:
- static AudioInputStream* MakeFakeStream(AudioManagerBase* manager,
- const AudioParameters& params);
-
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- // Generate one beep sound. This method is called by
- // FakeVideoCaptureDevice to test audio/video synchronization.
- // This is a static method because FakeVideoCaptureDevice is
- // disconnected from an audio device. This means only one instance of
- // this class gets to respond, which is okay because we assume there's
- // only one stream for this testing purpose.
- // TODO(hclam): Make this non-static. To do this we'll need to fix
- // crbug.com/159053 such that video capture device is aware of audio
- // input stream.
- static void BeepOnce();
-
- private:
- FakeAudioInputStream(AudioManagerBase* manager,
- const AudioParameters& params);
-
- virtual ~FakeAudioInputStream();
-
- void DoCallback();
-
- AudioManagerBase* audio_manager_;
- AudioInputCallback* callback_;
- scoped_array<uint8> buffer_;
- int buffer_size_;
- AudioParameters params_;
- base::Thread thread_;
- base::Time last_callback_time_;
- base::TimeDelta callback_interval_;
- int beep_duration_in_buffers_;
- int beep_generated_in_buffers_;
- int beep_period_in_frames_;
- int frames_elapsed_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_FAKE_AUDIO_INPUT_STREAM_H_
diff --git a/src/media/audio/fake_audio_output_stream.cc b/src/media/audio/fake_audio_output_stream.cc
deleted file mode 100644
index c21026d..0000000
--- a/src/media/audio/fake_audio_output_stream.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/fake_audio_output_stream.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "base/message_loop.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-// static
-AudioOutputStream* FakeAudioOutputStream::MakeFakeStream(
- AudioManagerBase* manager, const AudioParameters& params) {
- return new FakeAudioOutputStream(manager, params);
-}
-
-FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
- const AudioParameters& params)
- : audio_manager_(manager),
- callback_(NULL),
- audio_bus_(AudioBus::Create(params)),
- frames_per_millisecond_(
- params.sample_rate() / static_cast<float>(
- base::Time::kMillisecondsPerSecond)) {
-}
-
-FakeAudioOutputStream::~FakeAudioOutputStream() {
- DCHECK(!callback_);
-}
-
-bool FakeAudioOutputStream::Open() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- return true;
-}
-
-void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- callback_ = callback;
- on_more_data_cb_.Reset(base::Bind(
- &FakeAudioOutputStream::OnMoreDataTask, base::Unretained(this)));
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, on_more_data_cb_.callback());
-}
-
-void FakeAudioOutputStream::Stop() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- callback_ = NULL;
- on_more_data_cb_.Cancel();
-}
-
-void FakeAudioOutputStream::Close() {
- DCHECK(!callback_);
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- audio_manager_->ReleaseOutputStream(this);
-}
-
-void FakeAudioOutputStream::SetVolume(double volume) {};
-
-void FakeAudioOutputStream::GetVolume(double* volume) {
- *volume = 0;
-};
-
-void FakeAudioOutputStream::OnMoreDataTask() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- DCHECK(callback_);
-
- audio_bus_->Zero();
- int frames_received = callback_->OnMoreData(
- audio_bus_.get(), AudioBuffersState());
-
- // Calculate our sleep duration for simulated playback. Sleep for at least
- // one millisecond so we don't spin the CPU.
- audio_manager_->GetMessageLoop()->PostDelayedTask(
- FROM_HERE, on_more_data_cb_.callback(), base::TimeDelta::FromMilliseconds(
- std::max(1.0f, frames_received / frames_per_millisecond_)));
-}
-
-} // namespace media
diff --git a/src/media/audio/fake_audio_output_stream.h b/src/media/audio/fake_audio_output_stream.h
deleted file mode 100644
index d188b9f..0000000
--- a/src/media/audio/fake_audio_output_stream.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_FAKE_AUDIO_OUTPUT_STREAM_H_
-#define MEDIA_AUDIO_FAKE_AUDIO_OUTOUT_STREAM_H_
-
-#include "base/cancelable_callback.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerBase;
-
-// A fake implementation of AudioOutputStream. Used for testing and when a real
-// audio output device is unavailable or refusing output (e.g. remote desktop).
-class MEDIA_EXPORT FakeAudioOutputStream : public AudioOutputStream {
- public:
- static AudioOutputStream* MakeFakeStream(AudioManagerBase* manager,
- const AudioParameters& params);
-
- // AudioOutputStream implementation.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
- virtual void Close() OVERRIDE;
-
- private:
- FakeAudioOutputStream(AudioManagerBase* manager,
- const AudioParameters& params);
- virtual ~FakeAudioOutputStream();
-
- // Task that regularly calls |callback_->OnMoreData()| according to the
- // playback rate as determined by the audio parameters given during
- // construction. Runs on AudioManager's message loop.
- void OnMoreDataTask();
-
- AudioManagerBase* audio_manager_;
- AudioSourceCallback* callback_;
- scoped_ptr<AudioBus> audio_bus_;
- float frames_per_millisecond_;
-
- // Used to post delayed tasks to the AudioThread that we can cancel.
- base::CancelableClosure on_more_data_cb_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_FAKE_AUDIO_OUTPUT_STREAM_H_
diff --git a/src/media/audio/fake_audio_output_stream_unittest.cc b/src/media/audio/fake_audio_output_stream_unittest.cc
deleted file mode 100644
index 6838e3f..0000000
--- a/src/media/audio/fake_audio_output_stream_unittest.cc
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/time.h"
-#include "media/audio/fake_audio_output_stream.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class FakeAudioOutputStreamTest : public testing::Test {
- public:
- FakeAudioOutputStreamTest()
- : audio_manager_(AudioManager::Create()),
- params_(
- AudioParameters::AUDIO_FAKE, CHANNEL_LAYOUT_STEREO, 8000, 8, 128),
- source_(params_.channels(), 200.0, params_.sample_rate()),
- done_(false, false) {
- stream_ = audio_manager_->MakeAudioOutputStream(AudioParameters(params_));
- CHECK(stream_);
-
- time_between_callbacks_ = base::TimeDelta::FromMilliseconds(
- params_.frames_per_buffer() * base::Time::kMillisecondsPerSecond /
- static_cast<float>(params_.sample_rate()));
- }
-
- virtual ~FakeAudioOutputStreamTest() {}
-
- void RunOnAudioThread() {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- ASSERT_TRUE(stream_->Open());
- stream_->Start(&source_);
- }
-
- void RunOnceOnAudioThread() {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- RunOnAudioThread();
- // Start() should immediately post a task to run the source callback, so we
- // should end up with only a single callback being run.
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::EndTest, base::Unretained(this), 1));
- }
-
- void StopStartOnAudioThread() {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- stream_->Stop();
- stream_->Start(&source_);
- }
-
- void TimeCallbacksOnAudioThread(int callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- if (source_.callbacks() == 0) {
- RunOnAudioThread();
- start_time_ = base::Time::Now();
- }
-
- // Keep going until we've seen the requested number of callbacks.
- if (source_.callbacks() < callbacks) {
- audio_manager_->GetMessageLoop()->PostDelayedTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::TimeCallbacksOnAudioThread,
- base::Unretained(this), callbacks), time_between_callbacks_);
- } else {
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::EndTest, base::Unretained(this),
- callbacks));
- }
- }
-
- void EndTest(int callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- stream_->Stop();
- stream_->Close();
- EXPECT_EQ(callbacks, source_.callbacks());
- EXPECT_EQ(0, source_.errors());
- done_.Signal();
- }
-
- protected:
- scoped_ptr<AudioManager> audio_manager_;
- AudioParameters params_;
- AudioOutputStream* stream_;
- SineWaveAudioSource source_;
- base::WaitableEvent done_;
- base::Time start_time_;
- base::TimeDelta time_between_callbacks_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FakeAudioOutputStreamTest);
-};
-
-// Ensure the fake audio stream runs on the audio thread and handles fires
-// callbacks to the AudioSourceCallback.
-TEST_F(FakeAudioOutputStreamTest, FakeStreamBasicCallback) {
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::RunOnceOnAudioThread,
- base::Unretained(this)));
- done_.Wait();
-}
-
-// Ensure the time between callbacks is sane.
-TEST_F(FakeAudioOutputStreamTest, TimeBetweenCallbacks) {
- static const int kTestCallbacks = 5;
-
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::TimeCallbacksOnAudioThread,
- base::Unretained(this), kTestCallbacks));
-
- // Let the loop run for a second or two then issue Stop() / Start().
- audio_manager_->GetMessageLoop()->PostDelayedTask(FROM_HERE, base::Bind(
- &FakeAudioOutputStreamTest::StopStartOnAudioThread,
- base::Unretained(this)), time_between_callbacks_);
-
- done_.Wait();
-
- base::TimeDelta elapsed = base::Time::Now() - start_time_;
-
- // There are only (kTestCallbacks - 1) intervals between kTestCallbacks.
- float actual_time_between_callbacks_ms =
- elapsed.InMillisecondsF() / (kTestCallbacks - 1);
- float expected_time_between_callbacks_ms =
- time_between_callbacks_.InMillisecondsF();
-
- // Ensure callback time is no faster than the expected time between callbacks.
- EXPECT_GE(actual_time_between_callbacks_ms,
- expected_time_between_callbacks_ms);
-
- // Softly check if the callback time is no slower than twice the expected time
- // between callbacks. Since this test runs on the bots we can't be too strict
- // with the bounds.
- if (actual_time_between_callbacks_ms > 2 * expected_time_between_callbacks_ms)
- LOG(ERROR) << "Time between fake audio callbacks is too large!";
-}
-
-} // namespace media
diff --git a/src/media/audio/ios/audio_manager_ios.h b/src/media/audio/ios/audio_manager_ios.h
deleted file mode 100644
index 55cbe6e..0000000
--- a/src/media/audio/ios/audio_manager_ios.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-#define MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
-
-#include "base/basictypes.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class PCMQueueInAudioInputStream;
-class PCMQueueOutAudioOutputStream;
-
-// iOS implementation of the AudioManager singleton. Supports only audio input.
-class MEDIA_EXPORT AudioManagerIOS : public AudioManagerBase {
- public:
- AudioManagerIOS();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual AudioOutputStream* MakeAudioOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
- virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
-
- protected:
- virtual ~AudioManagerIOS();
-
- private:
- // Initializes the audio session if necessary. Safe to call multiple times.
- // Returns a bool indicating whether the audio session has been successfully
- // initialized (either in the current call or in a previous call).
- bool InitAudioSession();
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerIOS);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_IOS_AUDIO_MANAGER_IOS_H_
diff --git a/src/media/audio/ios/audio_manager_ios.mm b/src/media/audio/ios/audio_manager_ios.mm
deleted file mode 100644
index a4ffff5..0000000
--- a/src/media/audio/ios/audio_manager_ios.mm
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/ios/audio_manager_ios.h"
-
-#import <AudioToolbox/AudioToolbox.h>
-#import <AVFoundation/AVFoundation.h>
-
-#include "base/sys_info.h"
-#include "media/audio/fake_audio_input_stream.h"
-#include "media/audio/mac/audio_input_mac.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-enum { kMaxInputChannels = 2 };
-
-// Initializes the audio session, returning a bool indicating whether
-// initialization was successful. Should only be called once.
-static bool InitAudioSessionInternal() {
- OSStatus error = AudioSessionInitialize(NULL, NULL, NULL, NULL);
- DCHECK(error != kAudioSessionAlreadyInitialized);
- AVAudioSession* audioSession = [AVAudioSession sharedInstance];
- BOOL result = [audioSession setCategory:AVAudioSessionCategoryPlayAndRecord
- error:nil];
- DCHECK(result);
- UInt32 allowMixing = true;
- AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers,
- sizeof(allowMixing), &allowMixing);
- return error == kAudioSessionNoError;
-}
-
-AudioManagerIOS::AudioManagerIOS() {
-}
-
-AudioManagerIOS::~AudioManagerIOS() {
- Shutdown();
-}
-
-bool AudioManagerIOS::HasAudioOutputDevices() {
- return false;
-}
-
-bool AudioManagerIOS::HasAudioInputDevices() {
- if (!InitAudioSession())
- return false;
- // Note that the |kAudioSessionProperty_AudioInputAvailable| property is a
- // 32-bit integer, not a boolean.
- UInt32 property_size;
- OSStatus error =
- AudioSessionGetPropertySize(kAudioSessionProperty_AudioInputAvailable,
- &property_size);
- if (error != kAudioSessionNoError)
- return false;
- UInt32 audio_input_is_available = false;
- DCHECK(property_size == sizeof(audio_input_is_available));
- error = AudioSessionGetProperty(kAudioSessionProperty_AudioInputAvailable,
- &property_size,
- &audio_input_is_available);
- return error == kAudioSessionNoError ? audio_input_is_available : false;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeAudioOutputStream(
- const AudioParameters& params) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeAudioInputStream(
- const AudioParameters& params, const std::string& device_id) {
- // Current line of iOS devices has only one audio input.
- // Ignore the device_id (unittest uses a test value in it).
- if (!params.IsValid() || (params.channels() > kMaxInputChannels))
- return NULL;
-
- if (params.format() == AudioParameters::AUDIO_FAKE)
- return FakeAudioInputStream::MakeFakeStream(this, params);
- else if (params.format() == AudioParameters::AUDIO_PCM_LINEAR)
- return new PCMQueueInAudioInputStream(this, params);
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLinearOutputStream(
- const AudioParameters& params) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioOutputStream* AudioManagerIOS::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
- return NULL;
-}
-
-AudioInputStream* AudioManagerIOS::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- return MakeAudioInputStream(params, device_id);
-}
-
-AudioInputStream* AudioManagerIOS::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- NOTIMPLEMENTED(); // Only linear audio input is supported on iOS.
- return MakeAudioInputStream(params, device_id);
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseOutputStream(AudioOutputStream* stream) {
- NOTIMPLEMENTED(); // Only input is supported on iOS.
-}
-
-// Called by the stream when it has been released by calling Close().
-void AudioManagerIOS::ReleaseInputStream(AudioInputStream* stream) {
- delete stream;
-}
-
-bool AudioManagerIOS::InitAudioSession() {
- static const bool kSessionInitialized = InitAudioSessionInternal();
- return kSessionInitialized;
-}
-
-// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerIOS();
-}
-
-} // namespace media
diff --git a/src/media/audio/ios/audio_manager_ios_unittest.cc b/src/media/audio/ios/audio_manager_ios_unittest.cc
deleted file mode 100644
index e8013cc..0000000
--- a/src/media/audio/ios/audio_manager_ios_unittest.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using namespace media;
-
-// Test that input is supported and output is not.
-TEST(IOSAudioTest, AudioSupport) {
- AudioManager* audio_manager = AudioManager::Create();
- ASSERT_TRUE(NULL != audio_manager);
- ASSERT_FALSE(audio_manager->HasAudioOutputDevices());
- ASSERT_TRUE(audio_manager->HasAudioInputDevices());
-}
-
-// Test that input stream can be opened and closed.
-TEST(IOSAudioTest, InputStreamOpenAndClose) {
- AudioManager* audio_manager = AudioManager::Create();
- ASSERT_TRUE(NULL != audio_manager);
- if (!audio_manager->HasAudioInputDevices())
- return;
- AudioInputStream* ias = audio_manager->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024),
- std::string("test_device"));
- ASSERT_TRUE(NULL != ias);
- EXPECT_TRUE(ias->Open());
- ias->Close();
-}
diff --git a/src/media/audio/linux/alsa_input.cc b/src/media/audio/linux/alsa_input.cc
deleted file mode 100644
index ea199cb..0000000
--- a/src/media/audio/linux/alsa_input.cc
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/linux/alsa_input.h"
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/message_loop.h"
-#include "base/time.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
-
-namespace media {
-
-static const int kNumPacketsInRingBuffer = 3;
-
-static const char kDefaultDevice1[] = "default";
-static const char kDefaultDevice2[] = "plug:default";
-
-const char* AlsaPcmInputStream::kAutoSelectDevice = "";
-
-AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerLinux* audio_manager,
- const std::string& device_name,
- const AudioParameters& params,
- AlsaWrapper* wrapper)
- : audio_manager_(audio_manager),
- device_name_(device_name),
- params_(params),
- bytes_per_buffer_(params.frames_per_buffer() *
- (params.channels() * params.bits_per_sample()) / 8),
- wrapper_(wrapper),
- buffer_duration_ms_(
- (params.frames_per_buffer() * base::Time::kMillisecondsPerSecond) /
- params.sample_rate()),
- callback_(NULL),
- device_handle_(NULL),
- mixer_handle_(NULL),
- mixer_element_handle_(NULL),
- ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
- read_callback_behind_schedule_(false) {
-}
-
-AlsaPcmInputStream::~AlsaPcmInputStream() {}
-
-bool AlsaPcmInputStream::Open() {
- if (device_handle_)
- return false; // Already open.
-
- snd_pcm_format_t pcm_format = alsa_util::BitsToFormat(
- params_.bits_per_sample());
- if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
- LOG(WARNING) << "Unsupported bits per sample: "
- << params_.bits_per_sample();
- return false;
- }
-
- uint32 latency_us = buffer_duration_ms_ * kNumPacketsInRingBuffer *
- base::Time::kMicrosecondsPerMillisecond;
-
- // Use the same minimum required latency as output.
- latency_us = std::max(latency_us, AlsaPcmOutputStream::kMinLatencyMicros);
-
- if (device_name_ == kAutoSelectDevice) {
- const char* device_names[] = { kDefaultDevice1, kDefaultDevice2 };
- for (size_t i = 0; i < arraysize(device_names); ++i) {
- device_handle_ = alsa_util::OpenCaptureDevice(
- wrapper_, device_names[i], params_.channels(),
- params_.sample_rate(), pcm_format, latency_us);
-
- if (device_handle_) {
- device_name_ = device_names[i];
- break;
- }
- }
- } else {
- device_handle_ = alsa_util::OpenCaptureDevice(wrapper_,
- device_name_.c_str(),
- params_.channels(),
- params_.sample_rate(),
- pcm_format, latency_us);
- }
-
- if (device_handle_) {
- audio_buffer_.reset(new uint8[bytes_per_buffer_]);
-
- // Open the microphone mixer.
- mixer_handle_ = alsa_util::OpenMixer(wrapper_, device_name_);
- if (mixer_handle_) {
- mixer_element_handle_ = alsa_util::LoadCaptureMixerElement(
- wrapper_, mixer_handle_);
- }
- }
-
- return device_handle_ != NULL;
-}
-
-void AlsaPcmInputStream::Start(AudioInputCallback* callback) {
- DCHECK(!callback_ && callback);
- callback_ = callback;
- int error = wrapper_->PcmPrepare(device_handle_);
- if (error < 0) {
- HandleError("PcmPrepare", error);
- } else {
- error = wrapper_->PcmStart(device_handle_);
- if (error < 0)
- HandleError("PcmStart", error);
- }
-
- if (error < 0) {
- callback_ = NULL;
- } else {
- // We start reading data half |buffer_duration_ms_| later than when the
- // buffer might have got filled, to accommodate some delays in the audio
- // driver. This could also give us a smooth read sequence going forward.
- base::TimeDelta delay = base::TimeDelta::FromMilliseconds(
- buffer_duration_ms_ + buffer_duration_ms_ / 2);
- next_read_time_ = base::Time::Now() + delay;
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
- delay);
-
- audio_manager_->IncreaseActiveInputStreamCount();
- }
-}
-
-bool AlsaPcmInputStream::Recover(int original_error) {
- int error = wrapper_->PcmRecover(device_handle_, original_error, 1);
- if (error < 0) {
- // Docs say snd_pcm_recover returns the original error if it is not one
- // of the recoverable ones, so this log message will probably contain the
- // same error twice.
- LOG(WARNING) << "Unable to recover from \""
- << wrapper_->StrError(original_error) << "\": "
- << wrapper_->StrError(error);
- return false;
- }
-
- if (original_error == -EPIPE) { // Buffer underrun/overrun.
- // For capture streams we have to repeat the explicit start() to get
- // data flowing again.
- error = wrapper_->PcmStart(device_handle_);
- if (error < 0) {
- HandleError("PcmStart", error);
- return false;
- }
- }
-
- return true;
-}
-
-snd_pcm_sframes_t AlsaPcmInputStream::GetCurrentDelay() {
- snd_pcm_sframes_t delay = -1;
-
- int error = wrapper_->PcmDelay(device_handle_, &delay);
- if (error < 0)
- Recover(error);
-
- // snd_pcm_delay() may not work in the beginning of the stream. In this case
- // return delay of data we know currently is in the ALSA's buffer.
- if (delay < 0)
- delay = wrapper_->PcmAvailUpdate(device_handle_);
-
- return delay;
-}
-
-void AlsaPcmInputStream::ReadAudio() {
- DCHECK(callback_);
-
- snd_pcm_sframes_t frames = wrapper_->PcmAvailUpdate(device_handle_);
- if (frames < 0) { // Potentially recoverable error?
- LOG(WARNING) << "PcmAvailUpdate(): " << wrapper_->StrError(frames);
- Recover(frames);
- }
-
- if (frames < params_.frames_per_buffer()) {
- // Not enough data yet or error happened. In both cases wait for a very
- // small duration before checking again.
- // Even Though read callback was behind schedule, there is no data, so
- // reset the next_read_time_.
- if (read_callback_behind_schedule_) {
- next_read_time_ = base::Time::Now();
- read_callback_behind_schedule_ = false;
- }
-
- base::TimeDelta next_check_time = base::TimeDelta::FromMilliseconds(
- buffer_duration_ms_ / 2);
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
- next_check_time);
- return;
- }
-
- int num_buffers = frames / params_.frames_per_buffer();
- uint32 hardware_delay_bytes =
- static_cast<uint32>(GetCurrentDelay() * params_.GetBytesPerFrame());
- double normalized_volume = 0.0;
-
- // Update the AGC volume level once every second. Note that, |volume| is
- // also updated each time SetVolume() is called through IPC by the
- // render-side AGC.
- QueryAgcVolume(&normalized_volume);
-
- while (num_buffers--) {
- int frames_read = wrapper_->PcmReadi(device_handle_, audio_buffer_.get(),
- params_.frames_per_buffer());
- if (frames_read == params_.frames_per_buffer()) {
- callback_->OnData(this, audio_buffer_.get(), bytes_per_buffer_,
- hardware_delay_bytes, normalized_volume);
- } else {
- LOG(WARNING) << "PcmReadi returning less than expected frames: "
- << frames_read << " vs. " << params_.frames_per_buffer()
- << ". Dropping this buffer.";
- }
- }
-
- next_read_time_ += base::TimeDelta::FromMilliseconds(buffer_duration_ms_);
- base::TimeDelta delay = next_read_time_ - base::Time::Now();
- if (delay < base::TimeDelta()) {
- LOG(WARNING) << "Audio read callback behind schedule by "
- << (buffer_duration_ms_ - delay.InMilliseconds())
- << " (ms).";
- // Read callback is behind schedule. Assuming there is data pending in
- // the soundcard, invoke the read callback immediate in order to catch up.
- read_callback_behind_schedule_ = true;
- delay = base::TimeDelta();
- }
-
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AlsaPcmInputStream::ReadAudio, weak_factory_.GetWeakPtr()),
- delay);
-}
-
-void AlsaPcmInputStream::Stop() {
- if (!device_handle_ || !callback_)
- return;
-
- // Stop is always called before Close. In case of error, this will be
- // also called when closing the input controller.
- audio_manager_->DecreaseActiveInputStreamCount();
-
- weak_factory_.InvalidateWeakPtrs(); // Cancel the next scheduled read.
- int error = wrapper_->PcmDrop(device_handle_);
- if (error < 0)
- HandleError("PcmDrop", error);
-}
-
-void AlsaPcmInputStream::Close() {
- if (device_handle_) {
- weak_factory_.InvalidateWeakPtrs(); // Cancel the next scheduled read.
- int error = alsa_util::CloseDevice(wrapper_, device_handle_);
- if (error < 0)
- HandleError("PcmClose", error);
-
- if (mixer_handle_)
- alsa_util::CloseMixer(wrapper_, mixer_handle_, device_name_);
-
- audio_buffer_.reset();
- device_handle_ = NULL;
- mixer_handle_ = NULL;
- mixer_element_handle_ = NULL;
-
- if (callback_)
- callback_->OnClose(this);
- }
-
- audio_manager_->ReleaseInputStream(this);
-}
-
-double AlsaPcmInputStream::GetMaxVolume() {
- if (!mixer_handle_ || !mixer_element_handle_) {
- DLOG(WARNING) << "GetMaxVolume is not supported for " << device_name_;
- return 0.0;
- }
-
- if (!wrapper_->MixerSelemHasCaptureVolume(mixer_element_handle_)) {
- DLOG(WARNING) << "Unsupported microphone volume for " << device_name_;
- return 0.0;
- }
-
- long min = 0;
- long max = 0;
- if (wrapper_->MixerSelemGetCaptureVolumeRange(mixer_element_handle_,
- &min,
- &max)) {
- DLOG(WARNING) << "Unsupported max microphone volume for " << device_name_;
- return 0.0;
- }
- DCHECK(min == 0);
- DCHECK(max > 0);
-
- return static_cast<double>(max);
-}
-
-void AlsaPcmInputStream::SetVolume(double volume) {
- if (!mixer_handle_ || !mixer_element_handle_) {
- DLOG(WARNING) << "SetVolume is not supported for " << device_name_;
- return;
- }
-
- int error = wrapper_->MixerSelemSetCaptureVolumeAll(
- mixer_element_handle_, static_cast<long>(volume));
- if (error < 0) {
- DLOG(WARNING) << "Unable to set volume for " << device_name_;
- }
-
- // Update the AGC volume level based on the last setting above. Note that,
- // the volume-level resolution is not infinite and it is therefore not
- // possible to assume that the volume provided as input parameter can be
- // used directly. Instead, a new query to the audio hardware is required.
- // This method does nothing if AGC is disabled.
- UpdateAgcVolume();
-}
-
-double AlsaPcmInputStream::GetVolume() {
- if (!mixer_handle_ || !mixer_element_handle_) {
- DLOG(WARNING) << "GetVolume is not supported for " << device_name_;
- return 0.0;
- }
-
- long current_volume = 0;
- int error = wrapper_->MixerSelemGetCaptureVolume(
- mixer_element_handle_, static_cast<snd_mixer_selem_channel_id_t>(0),
- ¤t_volume);
- if (error < 0) {
- DLOG(WARNING) << "Unable to get volume for " << device_name_;
- return 0.0;
- }
-
- return static_cast<double>(current_volume);
-}
-
-void AlsaPcmInputStream::HandleError(const char* method, int error) {
- LOG(WARNING) << method << ": " << wrapper_->StrError(error);
- callback_->OnError(this, error);
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/alsa_input.h b/src/media/audio/linux/alsa_input.h
deleted file mode 100644
index ae027d4..0000000
--- a/src/media/audio/linux/alsa_input.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
-#define MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
-
-#include <alsa/asoundlib.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/time.h"
-#include "media/audio/audio_input_stream_impl.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AlsaWrapper;
-class AudioManagerLinux;
-
-// Provides an input stream for audio capture based on the ALSA PCM interface.
-// This object is not thread safe and all methods should be invoked in the
-// thread that created the object.
-class AlsaPcmInputStream : public AudioInputStreamImpl {
- public:
- // Pass this to the constructor if you want to attempt auto-selection
- // of the audio recording device.
- static const char* kAutoSelectDevice;
-
- // Create a PCM Output stream for the ALSA device identified by
- // |device_name|. If unsure of what to use for |device_name|, use
- // |kAutoSelectDevice|.
- AlsaPcmInputStream(AudioManagerLinux* audio_manager,
- const std::string& device_name,
- const AudioParameters& params,
- AlsaWrapper* wrapper);
-
- virtual ~AlsaPcmInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
-
- private:
- // Logs the error and invokes any registered callbacks.
- void HandleError(const char* method, int error);
-
- // Reads one or more buffers of audio from the device, passes on to the
- // registered callback and schedules the next read.
- void ReadAudio();
-
- // Recovers from any device errors if possible.
- bool Recover(int error);
-
- // Utility function for talking with the ALSA API.
- snd_pcm_sframes_t GetCurrentDelay();
-
- // Non-refcounted pointer back to the audio manager.
- // The AudioManager indirectly holds on to stream objects, so we don't
- // want circular references. Additionally, stream objects live on the audio
- // thread, which is owned by the audio manager and we don't want to addref
- // the manager from that thread.
- AudioManagerLinux* audio_manager_;
- std::string device_name_;
- AudioParameters params_;
- int bytes_per_buffer_;
- AlsaWrapper* wrapper_;
- int buffer_duration_ms_; // Length of each recorded buffer in milliseconds.
- AudioInputCallback* callback_; // Valid during a recording session.
- base::Time next_read_time_; // Scheduled time for the next read callback.
- snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
- snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
- snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
- base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
- scoped_array<uint8> audio_buffer_; // Buffer used for reading audio data.
- bool read_callback_behind_schedule_;
-
- DISALLOW_COPY_AND_ASSIGN(AlsaPcmInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
diff --git a/src/media/audio/linux/alsa_output.cc b/src/media/audio/linux/alsa_output.cc
deleted file mode 100644
index 1c822b6..0000000
--- a/src/media/audio/linux/alsa_output.cc
+++ /dev/null
@@ -1,796 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// THREAD SAFETY
-//
-// AlsaPcmOutputStream object is *not* thread-safe and should only be used
-// from the audio thread. We DCHECK on this assumption whenever we can.
-//
-// SEMANTICS OF Close()
-//
-// Close() is responsible for cleaning up any resources that were acquired after
-// a successful Open(). Close() will nullify any scheduled outstanding runnable
-// methods.
-//
-//
-// SEMANTICS OF ERROR STATES
-//
-// The object has two distinct error states: |state_| == kInError
-// and |stop_stream_|. The |stop_stream_| variable is used to indicate
-// that the playback_handle should no longer be used either because of a
-// hardware/low-level event.
-//
-// When |state_| == kInError, all public API functions will fail with an error
-// (Start() will call the OnError() function on the callback immediately), or
-// no-op themselves with the exception of Close(). Even if an error state has
-// been entered, if Open() has previously returned successfully, Close() must be
-// called to cleanup the ALSA devices and release resources.
-//
-// When |stop_stream_| is set, no more commands will be made against the
-// ALSA device, and playback will effectively stop. From the client's point of
-// view, it will seem that the device has just clogged and stopped requesting
-// data.
-
-#include "media/audio/linux/alsa_output.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/message_loop.h"
-#include "base/stl_util.h"
-#include "base/time.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
-#include "media/base/channel_mixer.h"
-#include "media/base/data_buffer.h"
-#include "media/base/seekable_buffer.h"
-
-namespace media {
-
-// Amount of time to wait if we've exhausted the data source. This is to avoid
-// busy looping.
-static const uint32 kNoDataSleepMilliseconds = 10;
-
-// Mininum interval between OnMoreData() calls. This is to avoid glitches for
-// WebAudio which needs time to generate new data.
-static const uint32 kMinIntervalBetweenOnMoreDataCallsInMs = 5;
-
-// According to the linux nanosleep manpage, nanosleep on linux can miss the
-// deadline by up to 10ms because the kernel timeslice is 10ms. This should be
-// enough to compensate for the timeslice, and any additional slowdowns.
-static const uint32 kSleepErrorMilliseconds = 10;
-
-// Set to 0 during debugging if you want error messages due to underrun
-// events or other recoverable errors.
-#if defined(NDEBUG)
-static const int kPcmRecoverIsSilent = 1;
-#else
-static const int kPcmRecoverIsSilent = 0;
-#endif
-
-// While the "default" device may support multi-channel audio, in Alsa, only
-// the device names surround40, surround41, surround50, etc, have a defined
-// channel mapping according to Lennart:
-//
-// http://0pointer.de/blog/projects/guide-to-sound-apis.html
-//
-// This function makes a best guess at the specific > 2 channel device name
-// based on the number of channels requested. NULL is returned if no device
-// can be found to match the channel numbers. In this case, using
-// kDefaultDevice is probably the best bet.
-//
-// A five channel source is assumed to be surround50 instead of surround41
-// (which is also 5 channels).
-//
-// TODO(ajwong): The source data should have enough info to tell us if we want
-// surround41 versus surround51, etc., instead of needing us to guess based on
-// channel number. Fix API to pass that data down.
-static const char* GuessSpecificDeviceName(uint32 channels) {
- switch (channels) {
- case 8:
- return "surround71";
-
- case 7:
- return "surround70";
-
- case 6:
- return "surround51";
-
- case 5:
- return "surround50";
-
- case 4:
- return "surround40";
-
- default:
- return NULL;
- }
-}
-
-std::ostream& operator<<(std::ostream& os,
- AlsaPcmOutputStream::InternalState state) {
- switch (state) {
- case AlsaPcmOutputStream::kInError:
- os << "kInError";
- break;
- case AlsaPcmOutputStream::kCreated:
- os << "kCreated";
- break;
- case AlsaPcmOutputStream::kIsOpened:
- os << "kIsOpened";
- break;
- case AlsaPcmOutputStream::kIsPlaying:
- os << "kIsPlaying";
- break;
- case AlsaPcmOutputStream::kIsStopped:
- os << "kIsStopped";
- break;
- case AlsaPcmOutputStream::kIsClosed:
- os << "kIsClosed";
- break;
- };
- return os;
-}
-
-const char AlsaPcmOutputStream::kDefaultDevice[] = "default";
-const char AlsaPcmOutputStream::kAutoSelectDevice[] = "";
-const char AlsaPcmOutputStream::kPlugPrefix[] = "plug:";
-
-// We use 40ms as our minimum required latency. If it is needed, we may be able
-// to get it down to 20ms.
-const uint32 AlsaPcmOutputStream::kMinLatencyMicros = 40 * 1000;
-
-AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
- const AudioParameters& params,
- AlsaWrapper* wrapper,
- AudioManagerLinux* manager)
- : requested_device_name_(device_name),
- pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample())),
- channels_(params.channels()),
- channel_layout_(params.channel_layout()),
- sample_rate_(params.sample_rate()),
- bytes_per_sample_(params.bits_per_sample() / 8),
- bytes_per_frame_(channels_ * params.bits_per_sample() / 8),
- packet_size_(params.GetBytesPerBuffer()),
- micros_per_packet_(FramesToMicros(
- params.frames_per_buffer(), sample_rate_)),
- latency_micros_(std::max(AlsaPcmOutputStream::kMinLatencyMicros,
- micros_per_packet_ * 2)),
- bytes_per_output_frame_(bytes_per_frame_),
- alsa_buffer_frames_(0),
- stop_stream_(false),
- wrapper_(wrapper),
- manager_(manager),
- message_loop_(MessageLoop::current()),
- playback_handle_(NULL),
- frames_per_packet_(packet_size_ / bytes_per_frame_),
- ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
- state_(kCreated),
- volume_(1.0f),
- source_callback_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- DCHECK_EQ(audio_bus_->frames() * bytes_per_frame_, packet_size_);
-
- // Sanity check input values.
- if (!params.IsValid()) {
- LOG(WARNING) << "Unsupported audio parameters.";
- TransitionTo(kInError);
- }
-
- if (pcm_format_ == SND_PCM_FORMAT_UNKNOWN) {
- LOG(WARNING) << "Unsupported bits per sample: " << params.bits_per_sample();
- TransitionTo(kInError);
- }
-}
-
-AlsaPcmOutputStream::~AlsaPcmOutputStream() {
- InternalState current_state = state();
- DCHECK(current_state == kCreated ||
- current_state == kIsClosed ||
- current_state == kInError);
- DCHECK(!playback_handle_);
-}
-
-bool AlsaPcmOutputStream::Open() {
- DCHECK(IsOnAudioThread());
-
- if (state() == kInError)
- return false;
-
- if (!CanTransitionTo(kIsOpened)) {
- NOTREACHED() << "Invalid state: " << state();
- return false;
- }
-
- // We do not need to check if the transition was successful because
- // CanTransitionTo() was checked above, and it is assumed that this
- // object's public API is only called on one thread so the state cannot
- // transition out from under us.
- TransitionTo(kIsOpened);
-
- // Try to open the device.
- if (requested_device_name_ == kAutoSelectDevice) {
- playback_handle_ = AutoSelectDevice(latency_micros_);
- if (playback_handle_)
- DVLOG(1) << "Auto-selected device: " << device_name_;
- } else {
- device_name_ = requested_device_name_;
- playback_handle_ = alsa_util::OpenPlaybackDevice(
- wrapper_, device_name_.c_str(), channels_, sample_rate_,
- pcm_format_, latency_micros_);
- }
-
- // Finish initializing the stream if the device was opened successfully.
- if (playback_handle_ == NULL) {
- stop_stream_ = true;
- TransitionTo(kInError);
- return false;
- } else {
- bytes_per_output_frame_ = channel_mixer_ ?
- mixed_audio_bus_->channels() * bytes_per_sample_ : bytes_per_frame_;
- uint32 output_packet_size = frames_per_packet_ * bytes_per_output_frame_;
- buffer_.reset(new media::SeekableBuffer(0, output_packet_size));
-
- // Get alsa buffer size.
- snd_pcm_uframes_t buffer_size;
- snd_pcm_uframes_t period_size;
- int error = wrapper_->PcmGetParams(playback_handle_, &buffer_size,
- &period_size);
- if (error < 0) {
- LOG(ERROR) << "Failed to get playback buffer size from ALSA: "
- << wrapper_->StrError(error);
- // Buffer size is at least twice of packet size.
- alsa_buffer_frames_ = frames_per_packet_ * 2;
- } else {
- alsa_buffer_frames_ = buffer_size;
- }
- }
-
- return true;
-}
-
-void AlsaPcmOutputStream::Close() {
- DCHECK(IsOnAudioThread());
-
- if (state() != kIsClosed)
- TransitionTo(kIsClosed);
-
- // Shutdown the audio device.
- if (playback_handle_) {
- if (alsa_util::CloseDevice(wrapper_, playback_handle_) < 0) {
- LOG(WARNING) << "Unable to close audio device. Leaking handle.";
- }
- playback_handle_ = NULL;
-
- // Release the buffer.
- buffer_.reset();
-
- // Signal anything that might already be scheduled to stop.
- stop_stream_ = true; // Not necessary in production, but unit tests
- // uses the flag to verify that stream was closed.
- }
-
- weak_factory_.InvalidateWeakPtrs();
-
- // Signal to the manager that we're closed and can be removed.
- // Should be last call in the method as it deletes "this".
- manager_->ReleaseOutputStream(this);
-}
-
-void AlsaPcmOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(IsOnAudioThread());
-
- CHECK(callback);
-
- if (stop_stream_)
- return;
-
- set_source_callback(callback);
-
- // Only post the task if we can enter the playing state.
- if (TransitionTo(kIsPlaying) == kIsPlaying) {
- // Before starting, the buffer might have audio from previous user of this
- // device.
- buffer_->Clear();
-
- // When starting again, drop all packets in the device and prepare it again
- // in case we are restarting from a pause state and need to flush old data.
- int error = wrapper_->PcmDrop(playback_handle_);
- if (error < 0 && error != -EAGAIN) {
- LOG(ERROR) << "Failure clearing playback device ("
- << wrapper_->PcmName(playback_handle_) << "): "
- << wrapper_->StrError(error);
- stop_stream_ = true;
- } else {
- error = wrapper_->PcmPrepare(playback_handle_);
- if (error < 0 && error != -EAGAIN) {
- LOG(ERROR) << "Failure preparing stream ("
- << wrapper_->PcmName(playback_handle_) << "): "
- << wrapper_->StrError(error);
- stop_stream_ = true;
- }
- }
-
- if (!stop_stream_)
- WriteTask();
- }
-}
-
-void AlsaPcmOutputStream::Stop() {
- DCHECK(IsOnAudioThread());
-
- // Reset the callback, so that it is not called anymore.
- set_source_callback(NULL);
-
- TransitionTo(kIsStopped);
-}
-
-void AlsaPcmOutputStream::SetVolume(double volume) {
- DCHECK(IsOnAudioThread());
-
- volume_ = static_cast<float>(volume);
-}
-
-void AlsaPcmOutputStream::GetVolume(double* volume) {
- DCHECK(IsOnAudioThread());
-
- *volume = volume_;
-}
-
-void AlsaPcmOutputStream::BufferPacket(bool* source_exhausted) {
- DCHECK(IsOnAudioThread());
-
- // If stopped, simulate a 0-length packet.
- if (stop_stream_) {
- buffer_->Clear();
- *source_exhausted = true;
- return;
- }
-
- *source_exhausted = false;
-
- // Request more data only when we run out of data in the buffer, because
- // WritePacket() comsumes only the current chunk of data.
- if (!buffer_->forward_bytes()) {
- // Before making a request to source for data we need to determine the
- // delay (in bytes) for the requested data to be played.
-
- uint32 buffer_delay = buffer_->forward_bytes() * bytes_per_frame_ /
- bytes_per_output_frame_;
-
- uint32 hardware_delay = GetCurrentDelay() * bytes_per_frame_;
-
- scoped_refptr<media::DataBuffer> packet =
- new media::DataBuffer(packet_size_);
- int frames_filled = RunDataCallback(
- audio_bus_.get(), AudioBuffersState(buffer_delay, hardware_delay));
- size_t packet_size = frames_filled * bytes_per_frame_;
- DCHECK_LE(packet_size, packet_size_);
-
- // Reset the |last_fill_time| to avoid back to back RunDataCallback().
- last_fill_time_ = base::Time::Now();
-
- // TODO(dalecurtis): Channel downmixing, upmixing, should be done in mixer;
- // volume adjust should use SSE optimized vector_fmul() prior to interleave.
- AudioBus* output_bus = audio_bus_.get();
- if (channel_mixer_) {
- output_bus = mixed_audio_bus_.get();
- channel_mixer_->Transform(audio_bus_.get(), output_bus);
- // Adjust packet size for downmix.
- packet_size = packet_size / bytes_per_frame_ * bytes_per_output_frame_;
- }
-
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- output_bus->ToInterleaved(
- frames_filled, bytes_per_sample_, packet->GetWritableData());
-
- media::AdjustVolume(packet->GetWritableData(),
- packet_size,
- output_bus->channels(),
- bytes_per_sample_,
- volume_);
-
- if (packet_size > 0) {
- packet->SetDataSize(packet_size);
- // Add the packet to the buffer.
- buffer_->Append(packet);
- } else {
- *source_exhausted = true;
- }
- }
-}
-
-void AlsaPcmOutputStream::WritePacket() {
- DCHECK(IsOnAudioThread());
-
- // If the device is in error, just eat the bytes.
- if (stop_stream_) {
- buffer_->Clear();
- return;
- }
-
- if (state() != kIsPlaying)
- return;
-
- CHECK_EQ(buffer_->forward_bytes() % bytes_per_output_frame_, 0u);
-
- const uint8* buffer_data;
- int buffer_size;
- if (buffer_->GetCurrentChunk(&buffer_data, &buffer_size)) {
- buffer_size = buffer_size - (buffer_size % bytes_per_output_frame_);
- snd_pcm_sframes_t frames = std::min(
- static_cast<snd_pcm_sframes_t>(buffer_size / bytes_per_output_frame_),
- GetAvailableFrames());
-
- snd_pcm_sframes_t frames_written =
- wrapper_->PcmWritei(playback_handle_, buffer_data, frames);
- if (frames_written < 0) {
- // Attempt once to immediately recover from EINTR,
- // EPIPE (overrun/underrun), ESTRPIPE (stream suspended). WritePacket
- // will eventually be called again, so eventual recovery will happen if
- // muliple retries are required.
- frames_written = wrapper_->PcmRecover(playback_handle_,
- frames_written,
- kPcmRecoverIsSilent);
- if (frames_written < 0) {
- if (frames_written != -EAGAIN) {
- LOG(ERROR) << "Failed to write to pcm device: "
- << wrapper_->StrError(frames_written);
- RunErrorCallback(frames_written);
- stop_stream_ = true;
- }
- }
- } else {
- DCHECK_EQ(frames_written, frames);
-
- // Seek forward in the buffer after we've written some data to ALSA.
- buffer_->Seek(frames_written * bytes_per_output_frame_);
- }
- } else {
- // If nothing left to write and playback hasn't started yet, start it now.
- // This ensures that shorter sounds will still play.
- if (playback_handle_ &&
- (wrapper_->PcmState(playback_handle_) == SND_PCM_STATE_PREPARED) &&
- GetCurrentDelay() > 0) {
- wrapper_->PcmStart(playback_handle_);
- }
- }
-}
-
-void AlsaPcmOutputStream::WriteTask() {
- DCHECK(IsOnAudioThread());
-
- if (stop_stream_)
- return;
-
- if (state() == kIsStopped)
- return;
-
- bool source_exhausted;
- BufferPacket(&source_exhausted);
- WritePacket();
-
- ScheduleNextWrite(source_exhausted);
-}
-
-void AlsaPcmOutputStream::ScheduleNextWrite(bool source_exhausted) {
- DCHECK(IsOnAudioThread());
-
- if (stop_stream_)
- return;
-
- const uint32 kTargetFramesAvailable = alsa_buffer_frames_ / 2;
- uint32 available_frames = GetAvailableFrames();
- uint32 frames_in_buffer = buffer_->forward_bytes() / bytes_per_output_frame_;
-
- // Next write is initially scheduled for the moment when half of a packet
- // has been played out.
- uint32 next_fill_time_ms =
- FramesToMillis(frames_per_packet_ / 2, sample_rate_);
-
- if (frames_in_buffer && available_frames) {
- // There is data in the current buffer, consume them immediately once we
- // have enough space in the soundcard.
- if (frames_in_buffer <= available_frames)
- next_fill_time_ms = 0;
- } else {
- // Otherwise schedule the next write for the moment when the available
- // buffer of the soundcards hits the |kTargetFramesAvailable|.
- if (available_frames < kTargetFramesAvailable) {
- uint32 frames_until_empty_enough =
- kTargetFramesAvailable - available_frames;
- next_fill_time_ms =
- FramesToMillis(frames_until_empty_enough, sample_rate_);
-
- // Adjust for the kernel timeslice and any additional slowdown.
- // TODO(xians): Remove this adjustment if it is not required by
- // low performance machines any more.
- if (next_fill_time_ms > kSleepErrorMilliseconds)
- next_fill_time_ms -= kSleepErrorMilliseconds;
- else
- next_fill_time_ms = 0;
- } else {
- // The sound card has |kTargetFramesAvailable| or more frames available.
- // Invoke the next write immediately to avoid underrun.
- next_fill_time_ms = 0;
- }
-
- // Avoid back-to-back writing.
- base::TimeDelta delay = base::Time::Now() - last_fill_time_;
- if (delay.InMilliseconds() < kMinIntervalBetweenOnMoreDataCallsInMs &&
- next_fill_time_ms < kMinIntervalBetweenOnMoreDataCallsInMs)
- next_fill_time_ms = kMinIntervalBetweenOnMoreDataCallsInMs;
- }
-
- // Avoid busy looping if the data source is exhausted.
- if (source_exhausted)
- next_fill_time_ms = std::max(next_fill_time_ms, kNoDataSleepMilliseconds);
-
- // Only schedule more reads/writes if we are still in the playing state.
- if (state() == kIsPlaying) {
- message_loop_->PostDelayedTask(
- FROM_HERE,
- base::Bind(&AlsaPcmOutputStream::WriteTask,
- weak_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(next_fill_time_ms));
- }
-}
-
-uint32 AlsaPcmOutputStream::FramesToMicros(uint32 frames,
- uint32 sample_rate) {
- return frames * base::Time::kMicrosecondsPerSecond / sample_rate;
-}
-
-uint32 AlsaPcmOutputStream::FramesToMillis(uint32 frames,
- uint32 sample_rate) {
- return frames * base::Time::kMillisecondsPerSecond / sample_rate;
-}
-
-std::string AlsaPcmOutputStream::FindDeviceForChannels(uint32 channels) {
- // Constants specified by the ALSA API for device hints.
- static const int kGetAllDevices = -1;
- static const char kPcmInterfaceName[] = "pcm";
- static const char kIoHintName[] = "IOID";
- static const char kNameHintName[] = "NAME";
-
- const char* wanted_device = GuessSpecificDeviceName(channels);
- if (!wanted_device)
- return "";
-
- std::string guessed_device;
- void** hints = NULL;
- int error = wrapper_->DeviceNameHint(kGetAllDevices,
- kPcmInterfaceName,
- &hints);
- if (error == 0) {
- // NOTE: Do not early return from inside this if statement. The
- // hints above need to be freed.
- for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are output capable.. Valid values are
- // "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(
- wrapper_->DeviceNameGetHint(*hint_iter, kIoHintName));
- if (io != NULL && strcmp(io.get(), "Input") == 0)
- continue;
-
- // Attempt to select the closest device for number of channels.
- scoped_ptr_malloc<char> name(
- wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
- if (strncmp(wanted_device, name.get(), strlen(wanted_device)) == 0) {
- guessed_device = name.get();
- break;
- }
- }
-
- // Destroy the hint now that we're done with it.
- wrapper_->DeviceNameFreeHint(hints);
- hints = NULL;
- } else {
- LOG(ERROR) << "Unable to get hints for devices: "
- << wrapper_->StrError(error);
- }
-
- return guessed_device;
-}
-
-snd_pcm_sframes_t AlsaPcmOutputStream::GetCurrentDelay() {
- snd_pcm_sframes_t delay = -1;
- // Don't query ALSA's delay if we have underrun since it'll be jammed at some
- // non-zero value and potentially even negative!
- //
- // Also, if we're in the prepared state, don't query because that seems to
- // cause an I/O error when we do query the delay.
- snd_pcm_state_t pcm_state = wrapper_->PcmState(playback_handle_);
- if (pcm_state != SND_PCM_STATE_XRUN &&
- pcm_state != SND_PCM_STATE_PREPARED) {
- int error = wrapper_->PcmDelay(playback_handle_, &delay);
- if (error < 0) {
- // Assume a delay of zero and attempt to recover the device.
- delay = -1;
- error = wrapper_->PcmRecover(playback_handle_,
- error,
- kPcmRecoverIsSilent);
- if (error < 0) {
- LOG(ERROR) << "Failed querying delay: " << wrapper_->StrError(error);
- }
- }
- }
-
- // snd_pcm_delay() sometimes returns crazy values. In this case return delay
- // of data we know currently is in ALSA's buffer.
- if (delay < 0 || static_cast<snd_pcm_uframes_t>(delay) > alsa_buffer_frames_)
- delay = alsa_buffer_frames_ - GetAvailableFrames();
-
- return delay;
-}
-
-snd_pcm_sframes_t AlsaPcmOutputStream::GetAvailableFrames() {
- DCHECK(IsOnAudioThread());
-
- if (stop_stream_)
- return 0;
-
- // Find the number of frames queued in the sound device.
- snd_pcm_sframes_t available_frames =
- wrapper_->PcmAvailUpdate(playback_handle_);
- if (available_frames < 0) {
- available_frames = wrapper_->PcmRecover(playback_handle_,
- available_frames,
- kPcmRecoverIsSilent);
- }
- if (available_frames < 0) {
- LOG(ERROR) << "Failed querying available frames. Assuming 0: "
- << wrapper_->StrError(available_frames);
- return 0;
- }
- if (static_cast<uint32>(available_frames) > alsa_buffer_frames_) {
- LOG(ERROR) << "ALSA returned " << available_frames << " of "
- << alsa_buffer_frames_ << " frames available.";
- return alsa_buffer_frames_;
- }
-
- return available_frames;
-}
-
-snd_pcm_t* AlsaPcmOutputStream::AutoSelectDevice(unsigned int latency) {
- // For auto-selection:
- // 1) Attempt to open a device that best matches the number of channels
- // requested.
- // 2) If that fails, attempt the "plug:" version of it in case ALSA can
- // remap do some software conversion to make it work.
- // 3) Fallback to kDefaultDevice.
- // 4) If that fails too, try the "plug:" version of kDefaultDevice.
- // 5) Give up.
- snd_pcm_t* handle = NULL;
- device_name_ = FindDeviceForChannels(channels_);
-
- // Step 1.
- if (!device_name_.empty()) {
- if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
- channels_, sample_rate_,
- pcm_format_,
- latency)) != NULL) {
- return handle;
- }
-
- // Step 2.
- device_name_ = kPlugPrefix + device_name_;
- if ((handle = alsa_util::OpenPlaybackDevice(wrapper_, device_name_.c_str(),
- channels_, sample_rate_,
- pcm_format_,
- latency)) != NULL) {
- return handle;
- }
- }
-
- // For the kDefaultDevice device, we can only reliably depend on 2-channel
- // output to have the correct ordering according to Lennart. For the channel
- // formats that we know how to downmix from (3 channel to 8 channel), setup
- // downmixing.
- uint32 default_channels = channels_;
- if (default_channels > 2) {
- channel_mixer_.reset(new ChannelMixer(
- channel_layout_, CHANNEL_LAYOUT_STEREO));
- default_channels = 2;
- mixed_audio_bus_ = AudioBus::Create(
- default_channels, audio_bus_->frames());
- }
-
- // Step 3.
- device_name_ = kDefaultDevice;
- if ((handle = alsa_util::OpenPlaybackDevice(
- wrapper_, device_name_.c_str(), default_channels, sample_rate_,
- pcm_format_, latency)) != NULL) {
- return handle;
- }
-
- // Step 4.
- device_name_ = kPlugPrefix + device_name_;
- if ((handle = alsa_util::OpenPlaybackDevice(
- wrapper_, device_name_.c_str(), default_channels, sample_rate_,
- pcm_format_, latency)) != NULL) {
- return handle;
- }
-
- // Unable to open any device.
- device_name_.clear();
- return NULL;
-}
-
-bool AlsaPcmOutputStream::CanTransitionTo(InternalState to) {
- switch (state_) {
- case kCreated:
- return to == kIsOpened || to == kIsClosed || to == kInError;
-
- case kIsOpened:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kIsPlaying:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kIsStopped:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kInError:
- return to == kIsClosed || to == kInError;
-
- case kIsClosed:
- default:
- return false;
- }
-}
-
-AlsaPcmOutputStream::InternalState
-AlsaPcmOutputStream::TransitionTo(InternalState to) {
- DCHECK(IsOnAudioThread());
-
- if (!CanTransitionTo(to)) {
- NOTREACHED() << "Cannot transition from: " << state_ << " to: " << to;
- state_ = kInError;
- } else {
- state_ = to;
- }
- return state_;
-}
-
-AlsaPcmOutputStream::InternalState AlsaPcmOutputStream::state() {
- return state_;
-}
-
-bool AlsaPcmOutputStream::IsOnAudioThread() const {
- return message_loop_ && message_loop_ == MessageLoop::current();
-}
-
-int AlsaPcmOutputStream::RunDataCallback(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- TRACE_EVENT0("audio", "AlsaPcmOutputStream::RunDataCallback");
-
- if (source_callback_)
- return source_callback_->OnMoreData(audio_bus, buffers_state);
-
- return 0;
-}
-
-void AlsaPcmOutputStream::RunErrorCallback(int code) {
- if (source_callback_)
- source_callback_->OnError(this, code);
-}
-
-// Changes the AudioSourceCallback to proxy calls to. Pass in NULL to
-// release ownership of the currently registered callback.
-void AlsaPcmOutputStream::set_source_callback(AudioSourceCallback* callback) {
- DCHECK(IsOnAudioThread());
- source_callback_ = callback;
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/alsa_output.h b/src/media/audio/linux/alsa_output.h
deleted file mode 100644
index ffb29f4..0000000
--- a/src/media/audio/linux/alsa_output.h
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Creates an output stream based on the ALSA PCM interface.
-//
-// On device write failure, the stream will move itself to an invalid state.
-// No more data will be pulled from the data source, or written to the device.
-// All calls to public API functions will either no-op themselves, or return an
-// error if possible. Specifically, If the stream is in an error state, Open()
-// will return false, and Start() will call OnError() immediately on the
-// provided callback.
-//
-// If the stream is successfully opened, Close() must be called. After Close
-// has been called, the object should be regarded as deleted and not touched.
-//
-// AlsaPcmOutputStream is a single threaded class that should only be used from
-// the audio thread. When modifying the code in this class, please read the
-// threading assumptions at the top of the implementation.
-
-#ifndef MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
-#define MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
-
-#include <alsa/asoundlib.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-class MessageLoop;
-
-namespace media {
-
-class AlsaWrapper;
-class AudioManagerLinux;
-class ChannelMixer;
-class SeekableBuffer;
-
-class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
- public:
- // String for the generic "default" ALSA device that has the highest
- // compatibility and chance of working.
- static const char kDefaultDevice[];
-
- // Pass this to the AlsaPcmOutputStream if you want to attempt auto-selection
- // of the audio device.
- static const char kAutoSelectDevice[];
-
- // Prefix for device names to enable ALSA library resampling.
- static const char kPlugPrefix[];
-
- // The minimum latency that is accepted by the device.
- static const uint32 kMinLatencyMicros;
-
- // Create a PCM Output stream for the ALSA device identified by
- // |device_name|. The AlsaPcmOutputStream uses |wrapper| to communicate with
- // the alsa libraries, allowing for dependency injection during testing. All
- // requesting of data, and writing to the alsa device will be done on
- // |message_loop|.
- //
- // If unsure of what to use for |device_name|, use |kAutoSelectDevice|.
- AlsaPcmOutputStream(const std::string& device_name,
- const AudioParameters& params,
- AlsaWrapper* wrapper,
- AudioManagerLinux* manager);
-
- virtual ~AlsaPcmOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- friend class AlsaPcmOutputStreamTest;
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
- AutoSelectDevice_DeviceSelect);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
- AutoSelectDevice_FallbackDevices);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, AutoSelectDevice_HintFail);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_Negative);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_StopStream);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_Underrun);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, BufferPacket_FullBuffer);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, ConstructedState);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, LatencyFloor);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, OpenClose);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, PcmOpenFailed);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, PcmSetParamsFailed);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, ScheduleNextWrite);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest,
- ScheduleNextWrite_StopStream);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, StartStop);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_FinishedPacket);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_NormalPacket);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_StopStream);
- FRIEND_TEST_ALL_PREFIXES(AlsaPcmOutputStreamTest, WritePacket_WriteFails);
-
- // Flags indicating the state of the stream.
- enum InternalState {
- kInError = 0,
- kCreated,
- kIsOpened,
- kIsPlaying,
- kIsStopped,
- kIsClosed
- };
- friend std::ostream& operator<<(std::ostream& os, InternalState);
-
- // Functions to get another packet from the data source and write it into the
- // ALSA device.
- void BufferPacket(bool* source_exhausted);
- void WritePacket();
- void WriteTask();
- void ScheduleNextWrite(bool source_exhausted);
-
- // Utility functions for talking with the ALSA API.
- static uint32 FramesToMicros(uint32 frames, uint32 sample_rate);
- static uint32 FramesToMillis(uint32 frames, uint32 sample_rate);
- std::string FindDeviceForChannels(uint32 channels);
- snd_pcm_sframes_t GetAvailableFrames();
- snd_pcm_sframes_t GetCurrentDelay();
-
- // Attempts to find the best matching linux audio device for the given number
- // of channels. This function will set |device_name_| and |channel_mixer_|.
- snd_pcm_t* AutoSelectDevice(uint32 latency);
-
- // Functions to safeguard state transitions. All changes to the object state
- // should go through these functions.
- bool CanTransitionTo(InternalState to);
- InternalState TransitionTo(InternalState to);
- InternalState state();
-
- // Returns true when we're on the audio thread or if the audio thread's
- // message loop is NULL (which will happen during shutdown).
- bool IsOnAudioThread() const;
-
- // API for Proxying calls to the AudioSourceCallback provided during
- // Start().
- //
- // TODO(ajwong): This is necessary because the ownership semantics for the
- // |source_callback_| object are incorrect in AudioRenderHost. The callback
- // is passed into the output stream, but ownership is not transfered which
- // requires a synchronization on access of the |source_callback_| to avoid
- // using a deleted callback.
- int RunDataCallback(AudioBus* audio_bus, AudioBuffersState buffers_state);
- void RunErrorCallback(int code);
-
- // Changes the AudioSourceCallback to proxy calls to. Pass in NULL to
- // release ownership of the currently registered callback.
- void set_source_callback(AudioSourceCallback* callback);
-
- // Configuration constants from the constructor. Referenceable by all threads
- // since they are constants.
- const std::string requested_device_name_;
- const snd_pcm_format_t pcm_format_;
- const uint32 channels_;
- const ChannelLayout channel_layout_;
- const uint32 sample_rate_;
- const uint32 bytes_per_sample_;
- const uint32 bytes_per_frame_;
-
- // Device configuration data. Populated after OpenTask() completes.
- std::string device_name_;
- uint32 packet_size_;
- uint32 micros_per_packet_;
- uint32 latency_micros_;
- uint32 bytes_per_output_frame_;
- uint32 alsa_buffer_frames_;
-
- // Flag indicating the code should stop reading from the data source or
- // writing to the ALSA device. This is set because the device has entered
- // an unrecoverable error state, or the ClosedTask() has executed.
- bool stop_stream_;
-
- // Wrapper class to invoke all the ALSA functions.
- AlsaWrapper* wrapper_;
-
- // Audio manager that created us. Used to report that we've been closed.
- AudioManagerLinux* manager_;
-
- // Message loop to use for polling. The object is owned by the AudioManager.
- // We hold a reference to the audio thread message loop since
- // AudioManagerBase::ShutDown() can invalidate the message loop pointer
- // before the stream gets deleted.
- MessageLoop* message_loop_;
-
- // Handle to the actual PCM playback device.
- snd_pcm_t* playback_handle_;
-
- scoped_ptr<media::SeekableBuffer> buffer_;
- uint32 frames_per_packet_;
-
- // Allows us to run tasks on the AlsaPcmOutputStream instance which are
- // bound by its lifetime.
- base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
-
- InternalState state_;
- float volume_; // Volume level from 0.0 to 1.0.
-
- AudioSourceCallback* source_callback_;
-
- base::Time last_fill_time_; // Time for the last OnMoreData() callback.
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- // Channel mixer and temporary bus for the final mixed channel data.
- scoped_ptr<ChannelMixer> channel_mixer_;
- scoped_ptr<AudioBus> mixed_audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStream);
-};
-
-MEDIA_EXPORT std::ostream& operator<<(std::ostream& os,
- AlsaPcmOutputStream::InternalState);
-
-}; // namespace media
-
-#endif // MEDIA_AUDIO_LINUX_ALSA_OUTPUT_H_
diff --git a/src/media/audio/linux/alsa_output_unittest.cc b/src/media/audio/linux/alsa_output_unittest.cc
deleted file mode 100644
index 1db97af..0000000
--- a/src/media/audio/linux/alsa_output_unittest.cc
+++ /dev/null
@@ -1,869 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop.h"
-#include "base/stringprintf.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#include "media/audio/linux/audio_manager_linux.h"
-#include "media/base/data_buffer.h"
-#include "media/base/seekable_buffer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using testing::_;
-using testing::AllOf;
-using testing::AtLeast;
-using testing::DoAll;
-using testing::Field;
-using testing::InSequence;
-using testing::Invoke;
-using testing::InvokeWithoutArgs;
-using testing::Mock;
-using testing::MockFunction;
-using testing::Return;
-using testing::SetArgumentPointee;
-using testing::StrictMock;
-using testing::StrEq;
-using testing::Unused;
-
-namespace media {
-
-class MockAlsaWrapper : public AlsaWrapper {
- public:
- MOCK_METHOD3(DeviceNameHint, int(int card,
- const char* iface,
- void*** hints));
- MOCK_METHOD2(DeviceNameGetHint, char*(const void* hint, const char* id));
- MOCK_METHOD1(DeviceNameFreeHint, int(void** hints));
-
- MOCK_METHOD4(PcmOpen, int(snd_pcm_t** handle, const char* name,
- snd_pcm_stream_t stream, int mode));
- MOCK_METHOD1(PcmClose, int(snd_pcm_t* handle));
- MOCK_METHOD1(PcmPrepare, int(snd_pcm_t* handle));
- MOCK_METHOD1(PcmDrop, int(snd_pcm_t* handle));
- MOCK_METHOD2(PcmDelay, int(snd_pcm_t* handle, snd_pcm_sframes_t* delay));
- MOCK_METHOD3(PcmWritei, snd_pcm_sframes_t(snd_pcm_t* handle,
- const void* buffer,
- snd_pcm_uframes_t size));
- MOCK_METHOD3(PcmReadi, snd_pcm_sframes_t(snd_pcm_t* handle,
- void* buffer,
- snd_pcm_uframes_t size));
- MOCK_METHOD3(PcmRecover, int(snd_pcm_t* handle, int err, int silent));
- MOCK_METHOD7(PcmSetParams, int(snd_pcm_t* handle, snd_pcm_format_t format,
- snd_pcm_access_t access, unsigned int channels,
- unsigned int rate, int soft_resample,
- unsigned int latency));
- MOCK_METHOD3(PcmGetParams, int(snd_pcm_t* handle,
- snd_pcm_uframes_t* buffer_size,
- snd_pcm_uframes_t* period_size));
- MOCK_METHOD1(PcmName, const char*(snd_pcm_t* handle));
- MOCK_METHOD1(PcmAvailUpdate, snd_pcm_sframes_t(snd_pcm_t* handle));
- MOCK_METHOD1(PcmState, snd_pcm_state_t(snd_pcm_t* handle));
- MOCK_METHOD1(PcmStart, int(snd_pcm_t* handle));
-
- MOCK_METHOD1(StrError, const char*(int errnum));
-};
-
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-class MockAudioManagerLinux : public AudioManagerLinux {
- public:
- MOCK_METHOD0(Init, void());
- MOCK_METHOD0(HasAudioOutputDevices, bool());
- MOCK_METHOD0(HasAudioInputDevices, bool());
- MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
-
- // We need to override this function in order to skip the checking the number
- // of active output streams. It is because the number of active streams
- // is managed inside MakeAudioOutputStream, and we don't use
- // MakeAudioOutputStream to create the stream in the tests.
- virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE {
- DCHECK(stream);
- delete stream;
- }
-
- // We don't mock this method since all tests will do the same thing
- // and use the current message loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
- return MessageLoop::current()->message_loop_proxy();
- }
-};
-
-class AlsaPcmOutputStreamTest : public testing::Test {
- protected:
- AlsaPcmOutputStreamTest() {
- mock_manager_.reset(new StrictMock<MockAudioManagerLinux>());
- }
-
- virtual ~AlsaPcmOutputStreamTest() {
- }
-
- AlsaPcmOutputStream* CreateStream(ChannelLayout layout) {
- return CreateStream(layout, kTestFramesPerPacket);
- }
-
- AlsaPcmOutputStream* CreateStream(ChannelLayout layout,
- int32 samples_per_packet) {
- AudioParameters params(kTestFormat, layout, kTestSampleRate,
- kTestBitsPerSample, samples_per_packet);
- return new AlsaPcmOutputStream(kTestDeviceName,
- params,
- &mock_alsa_wrapper_,
- mock_manager_.get());
- }
-
- // Helper function to malloc the string returned by DeviceNameHint for NAME.
- static char* EchoHint(const void* name, Unused) {
- return strdup(static_cast<const char*>(name));
- }
-
- // Helper function to malloc the string returned by DeviceNameHint for IOID.
- static char* OutputHint(Unused, Unused) {
- return strdup("Output");
- }
-
- // Helper function to initialize |test_stream->buffer_|. Must be called
- // in all tests that use buffer_ without opening the stream.
- void InitBuffer(AlsaPcmOutputStream* test_stream) {
- DCHECK(test_stream);
- packet_ = new media::DataBuffer(kTestPacketSize);
- packet_->SetDataSize(kTestPacketSize);
- test_stream->buffer_.reset(new media::SeekableBuffer(0, kTestPacketSize));
- test_stream->buffer_->Append(packet_.get());
- }
-
- static const ChannelLayout kTestChannelLayout;
- static const int kTestSampleRate;
- static const int kTestBitsPerSample;
- static const int kTestBytesPerFrame;
- static const AudioParameters::Format kTestFormat;
- static const char kTestDeviceName[];
- static const char kDummyMessage[];
- static const uint32 kTestFramesPerPacket;
- static const int kTestPacketSize;
- static const int kTestFailedErrno;
- static snd_pcm_t* const kFakeHandle;
-
- // Used to simulate DeviceNameHint.
- static char kSurround40[];
- static char kSurround41[];
- static char kSurround50[];
- static char kSurround51[];
- static char kSurround70[];
- static char kSurround71[];
- static void* kFakeHints[];
-
- StrictMock<MockAlsaWrapper> mock_alsa_wrapper_;
- scoped_ptr<StrictMock<MockAudioManagerLinux> > mock_manager_;
- MessageLoop message_loop_;
- scoped_refptr<media::DataBuffer> packet_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStreamTest);
-};
-
-const ChannelLayout AlsaPcmOutputStreamTest::kTestChannelLayout =
- CHANNEL_LAYOUT_STEREO;
-const int AlsaPcmOutputStreamTest::kTestSampleRate =
- AudioParameters::kAudioCDSampleRate;
-const int AlsaPcmOutputStreamTest::kTestBitsPerSample = 8;
-const int AlsaPcmOutputStreamTest::kTestBytesPerFrame =
- AlsaPcmOutputStreamTest::kTestBitsPerSample / 8 *
- ChannelLayoutToChannelCount(AlsaPcmOutputStreamTest::kTestChannelLayout);
-const AudioParameters::Format AlsaPcmOutputStreamTest::kTestFormat =
- AudioParameters::AUDIO_PCM_LINEAR;
-const char AlsaPcmOutputStreamTest::kTestDeviceName[] = "TestDevice";
-const char AlsaPcmOutputStreamTest::kDummyMessage[] = "dummy";
-const uint32 AlsaPcmOutputStreamTest::kTestFramesPerPacket = 1000;
-const int AlsaPcmOutputStreamTest::kTestPacketSize =
- AlsaPcmOutputStreamTest::kTestFramesPerPacket *
- AlsaPcmOutputStreamTest::kTestBytesPerFrame;
-const int AlsaPcmOutputStreamTest::kTestFailedErrno = -EACCES;
-snd_pcm_t* const AlsaPcmOutputStreamTest::kFakeHandle =
- reinterpret_cast<snd_pcm_t*>(1);
-
-char AlsaPcmOutputStreamTest::kSurround40[] = "surround40:CARD=foo,DEV=0";
-char AlsaPcmOutputStreamTest::kSurround41[] = "surround41:CARD=foo,DEV=0";
-char AlsaPcmOutputStreamTest::kSurround50[] = "surround50:CARD=foo,DEV=0";
-char AlsaPcmOutputStreamTest::kSurround51[] = "surround51:CARD=foo,DEV=0";
-char AlsaPcmOutputStreamTest::kSurround70[] = "surround70:CARD=foo,DEV=0";
-char AlsaPcmOutputStreamTest::kSurround71[] = "surround71:CARD=foo,DEV=0";
-void* AlsaPcmOutputStreamTest::kFakeHints[] = {
- kSurround40, kSurround41, kSurround50, kSurround51,
- kSurround70, kSurround71, NULL };
-
-// Custom action to clear a memory buffer.
-ACTION(ClearBuffer) {
- arg0->Zero();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, ConstructedState) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
- test_stream->Close();
-
- // Should support mono.
- test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
- test_stream->Close();
-
- // Should support multi-channel.
- test_stream = CreateStream(CHANNEL_LAYOUT_SURROUND);
- EXPECT_EQ(AlsaPcmOutputStream::kCreated, test_stream->state());
- test_stream->Close();
-
- // Bad bits per sample.
- AudioParameters bad_bps_params(kTestFormat, kTestChannelLayout,
- kTestSampleRate, kTestBitsPerSample - 1,
- kTestFramesPerPacket);
- test_stream = new AlsaPcmOutputStream(kTestDeviceName,
- bad_bps_params,
- &mock_alsa_wrapper_,
- mock_manager_.get());
- EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
- test_stream->Close();
-
- // Bad format.
- AudioParameters bad_format_params(
- AudioParameters::AUDIO_LAST_FORMAT, kTestChannelLayout, kTestSampleRate,
- kTestBitsPerSample, kTestFramesPerPacket);
- test_stream = new AlsaPcmOutputStream(kTestDeviceName,
- bad_format_params,
- &mock_alsa_wrapper_,
- mock_manager_.get());
- EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, LatencyFloor) {
- const double kMicrosPerFrame =
- static_cast<double>(1000000) / kTestSampleRate;
- const double kPacketFramesInMinLatency =
- AlsaPcmOutputStream::kMinLatencyMicros / kMicrosPerFrame / 2.0;
-
- // Test that packets which would cause a latency under less than
- // AlsaPcmOutputStream::kMinLatencyMicros will get clipped to
- // AlsaPcmOutputStream::kMinLatencyMicros,
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmSetParams(_, _, _, _, _, _,
- AlsaPcmOutputStream::kMinLatencyMicros))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
-
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout,
- kPacketFramesInMinLatency);
- ASSERT_TRUE(test_stream->Open());
-
- // Now close it and test that everything was released.
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle)).WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-
- Mock::VerifyAndClear(&mock_alsa_wrapper_);
- Mock::VerifyAndClear(mock_manager_.get());
-
- // Test that having more packets ends up with a latency based on packet size.
- const int kOverMinLatencyPacketSize = kPacketFramesInMinLatency + 1;
- int64 expected_micros = 2 * AlsaPcmOutputStream::FramesToMicros(
- kOverMinLatencyPacketSize, kTestSampleRate);
-
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmSetParams(_, _, _, _, _, _, expected_micros))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
-
- test_stream = CreateStream(kTestChannelLayout,
- kOverMinLatencyPacketSize);
- ASSERT_TRUE(test_stream->Open());
-
- // Now close it and test that everything was released.
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-
- Mock::VerifyAndClear(&mock_alsa_wrapper_);
- Mock::VerifyAndClear(mock_manager_.get());
-}
-
-TEST_F(AlsaPcmOutputStreamTest, OpenClose) {
- int64 expected_micros = 2 *
- AlsaPcmOutputStream::FramesToMicros(kTestPacketSize / kTestBytesPerFrame,
- kTestSampleRate);
-
- // Open() call opens the playback device, sets the parameters, posts a task
- // with the resulting configuration data, and transitions the object state to
- // kIsOpened.
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmOpen(_, StrEq(kTestDeviceName),
- SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmSetParams(kFakeHandle,
- SND_PCM_FORMAT_U8,
- SND_PCM_ACCESS_RW_INTERLEAVED,
- ChannelLayoutToChannelCount(kTestChannelLayout),
- kTestSampleRate,
- 1,
- expected_micros))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(kFakeHandle, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
-
- // Open the stream.
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_TRUE(test_stream->Open());
-
- EXPECT_EQ(AlsaPcmOutputStream::kIsOpened, test_stream->state());
- EXPECT_EQ(kFakeHandle, test_stream->playback_handle_);
- EXPECT_EQ(kTestFramesPerPacket, test_stream->frames_per_packet_);
- EXPECT_TRUE(test_stream->buffer_.get());
- EXPECT_FALSE(test_stream->stop_stream_);
-
- // Now close it and test that everything was released.
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, PcmOpenFailed) {
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
- .WillOnce(Return(kDummyMessage));
-
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_FALSE(test_stream->Open());
- ASSERT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
-
- // Ensure internal state is set for a no-op stream if PcmOpen() failes.
- EXPECT_TRUE(test_stream->stop_stream_);
- EXPECT_TRUE(test_stream->playback_handle_ == NULL);
- EXPECT_FALSE(test_stream->buffer_.get());
-
- // Close the stream since we opened it to make destruction happy.
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, PcmSetParamsFailed) {
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
- .WillOnce(Return(kDummyMessage));
-
- // If open fails, the stream stays in kCreated because it has effectively had
- // no changes.
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_FALSE(test_stream->Open());
- EXPECT_EQ(AlsaPcmOutputStream::kInError, test_stream->state());
-
- // Ensure internal state is set for a no-op stream if PcmSetParams() failes.
- EXPECT_TRUE(test_stream->stop_stream_);
- EXPECT_TRUE(test_stream->playback_handle_ == NULL);
- EXPECT_FALSE(test_stream->buffer_.get());
-
- // Close the stream since we opened it to make destruction happy.
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, StartStop) {
- // Open() call opens the playback device, sets the parameters, posts a task
- // with the resulting configuration data, and transitions the object state to
- // kIsOpened.
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
-
- // Open the stream.
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_TRUE(test_stream->Open());
-
- // Expect Device setup.
- EXPECT_CALL(mock_alsa_wrapper_, PcmDrop(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmPrepare(kFakeHandle))
- .WillOnce(Return(0));
-
- // Expect the pre-roll.
- MockAudioSourceCallback mock_callback;
- EXPECT_CALL(mock_alsa_wrapper_, PcmState(kFakeHandle))
- .WillRepeatedly(Return(SND_PCM_STATE_RUNNING));
- EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(kFakeHandle, _))
- .WillRepeatedly(DoAll(SetArgumentPointee<1>(0), Return(0)));
- EXPECT_CALL(mock_callback, OnMoreData(_, _))
- .WillRepeatedly(DoAll(ClearBuffer(), Return(kTestFramesPerPacket)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
- .WillRepeatedly(Return(kTestFramesPerPacket));
-
- // Expect scheduling.
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
- .Times(AtLeast(2))
- .WillRepeatedly(Return(kTestFramesPerPacket));
-
- test_stream->Start(&mock_callback);
- // Start() will issue a WriteTask() directly and then schedule the next one,
- // call Stop() immediately after to ensure we don't run the message loop
- // forever.
- test_stream->Stop();
- message_loop_.RunUntilIdle();
-
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, WritePacket_FinishedPacket) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
-
- // Nothing should happen. Don't set any expectations and Our strict mocks
- // should verify most of this.
-
- // Test empty buffer.
- test_stream->buffer_->Clear();
- test_stream->WritePacket();
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, WritePacket_NormalPacket) {
- // We need to open the stream before writing data to ALSA.
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_TRUE(test_stream->Open());
- InitBuffer(test_stream);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
-
- // Write a little less than half the data.
- int written = packet_->GetDataSize() / kTestBytesPerFrame / 2 - 1;
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
- .WillOnce(Return(written));
- EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, packet_->GetData(), _))
- .WillOnce(Return(written));
-
- test_stream->WritePacket();
-
- ASSERT_EQ(test_stream->buffer_->forward_bytes(),
- packet_->GetDataSize() - written * kTestBytesPerFrame);
-
- // Write the rest.
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
- .WillOnce(Return(kTestFramesPerPacket - written));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmWritei(kFakeHandle,
- packet_->GetData() + written * kTestBytesPerFrame,
- _))
- .WillOnce(Return(packet_->GetDataSize() / kTestBytesPerFrame - written));
- test_stream->WritePacket();
- EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
-
- // Now close it and test that everything was released.
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, WritePacket_WriteFails) {
- // We need to open the stream before writing data to ALSA.
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, _, _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle),
- Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmSetParams(_, _, _, _, _, _, _))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmGetParams(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(kTestFramesPerPacket),
- SetArgumentPointee<2>(kTestFramesPerPacket / 2),
- Return(0)));
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- ASSERT_TRUE(test_stream->Open());
- InitBuffer(test_stream);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
-
- // Fail due to a recoverable error and see that PcmRecover code path
- // continues normally.
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
- .WillOnce(Return(kTestFramesPerPacket));
- EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
- .WillOnce(Return(-EINTR));
- EXPECT_CALL(mock_alsa_wrapper_, PcmRecover(kFakeHandle, _, _))
- .WillOnce(Return(0));
-
- test_stream->WritePacket();
-
- ASSERT_EQ(test_stream->buffer_->forward_bytes(), packet_->GetDataSize());
-
- // Fail the next write, and see that stop_stream_ is set.
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(kFakeHandle))
- .WillOnce(Return(kTestFramesPerPacket));
- EXPECT_CALL(mock_alsa_wrapper_, PcmWritei(kFakeHandle, _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, PcmRecover(kFakeHandle, _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
- .WillOnce(Return(kDummyMessage));
- test_stream->WritePacket();
- EXPECT_EQ(test_stream->buffer_->forward_bytes(), packet_->GetDataSize());
- EXPECT_TRUE(test_stream->stop_stream_);
-
- // Now close it and test that everything was released.
- EXPECT_CALL(mock_alsa_wrapper_, PcmClose(kFakeHandle))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, PcmName(kFakeHandle))
- .WillOnce(Return(kTestDeviceName));
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, WritePacket_StopStream) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
-
- // No expectations set on the strict mock because nothing should be called.
- test_stream->stop_stream_ = true;
- test_stream->WritePacket();
- EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, BufferPacket) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->buffer_->Clear();
-
- MockAudioSourceCallback mock_callback;
- EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
- .WillOnce(Return(SND_PCM_STATE_RUNNING));
- EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(_, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(1), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
- .WillRepeatedly(Return(0)); // Buffer is full.
-
- // Return a partially filled packet.
- EXPECT_CALL(mock_callback, OnMoreData(_, _))
- .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
-
- bool source_exhausted;
- test_stream->set_source_callback(&mock_callback);
- test_stream->packet_size_ = kTestPacketSize;
- test_stream->BufferPacket(&source_exhausted);
-
- EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
- EXPECT_FALSE(source_exhausted);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, BufferPacket_Negative) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->buffer_->Clear();
-
- // Simulate where the underrun has occurred right after checking the delay.
- MockAudioSourceCallback mock_callback;
- EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
- .WillOnce(Return(SND_PCM_STATE_RUNNING));
- EXPECT_CALL(mock_alsa_wrapper_, PcmDelay(_, _))
- .WillOnce(DoAll(SetArgumentPointee<1>(-1), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
- .WillRepeatedly(Return(0)); // Buffer is full.
- EXPECT_CALL(mock_callback, OnMoreData(_, _))
- .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
-
- bool source_exhausted;
- test_stream->set_source_callback(&mock_callback);
- test_stream->packet_size_ = kTestPacketSize;
- test_stream->BufferPacket(&source_exhausted);
-
- EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
- EXPECT_FALSE(source_exhausted);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, BufferPacket_Underrun) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->buffer_->Clear();
-
- // If ALSA has underrun then we should assume a delay of zero.
- MockAudioSourceCallback mock_callback;
- EXPECT_CALL(mock_alsa_wrapper_, PcmState(_))
- .WillOnce(Return(SND_PCM_STATE_XRUN));
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
- .WillRepeatedly(Return(0)); // Buffer is full.
- EXPECT_CALL(mock_callback,
- OnMoreData(_, AllOf(
- Field(&AudioBuffersState::pending_bytes, 0),
- Field(&AudioBuffersState::hardware_delay_bytes, 0))))
- .WillOnce(DoAll(ClearBuffer(), Return(kTestFramesPerPacket / 2)));
-
- bool source_exhausted;
- test_stream->set_source_callback(&mock_callback);
- test_stream->packet_size_ = kTestPacketSize;
- test_stream->BufferPacket(&source_exhausted);
-
- EXPECT_EQ(kTestPacketSize / 2, test_stream->buffer_->forward_bytes());
- EXPECT_FALSE(source_exhausted);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, BufferPacket_FullBuffer) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- // No expectations set on the strict mock because nothing should be called.
- bool source_exhausted;
- test_stream->packet_size_ = kTestPacketSize;
- test_stream->BufferPacket(&source_exhausted);
- EXPECT_EQ(kTestPacketSize, test_stream->buffer_->forward_bytes());
- EXPECT_FALSE(source_exhausted);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_DeviceSelect) {
- // Try channels from 1 -> 9. and see that we get the more specific surroundXX
- // device opened for channels 4-8. For all other channels, the device should
- // default to |AlsaPcmOutputStream::kDefaultDevice|. We should also not
- // downmix any channel in this case because downmixing is only defined for
- // channels 4-8, which we are guaranteeing to work.
- //
- // Note that the loop starts at "1", so the first parameter is ignored in
- // these arrays.
- const char* kExpectedDeviceName[] = { NULL,
- AlsaPcmOutputStream::kDefaultDevice,
- AlsaPcmOutputStream::kDefaultDevice,
- AlsaPcmOutputStream::kDefaultDevice,
- kSurround40, kSurround50, kSurround51,
- kSurround70, kSurround71,
- AlsaPcmOutputStream::kDefaultDevice };
- bool kExpectedDownmix[] = { false, false, false, false, false, true,
- false, false, false, false };
- ChannelLayout kExpectedLayouts[] = { CHANNEL_LAYOUT_NONE,
- CHANNEL_LAYOUT_MONO,
- CHANNEL_LAYOUT_STEREO,
- CHANNEL_LAYOUT_SURROUND,
- CHANNEL_LAYOUT_4_0,
- CHANNEL_LAYOUT_5_0,
- CHANNEL_LAYOUT_5_1,
- CHANNEL_LAYOUT_7_0,
- CHANNEL_LAYOUT_7_1 };
-
-
- for (int i = 1; i < 9; ++i) {
- if (i == 3 || i == 4 || i == 5) // invalid number of channels
- continue;
- SCOPED_TRACE(base::StringPrintf("Attempting %d Channel", i));
-
- // Hints will only be grabbed for channel numbers that have non-default
- // devices associated with them.
- if (kExpectedDeviceName[i] != AlsaPcmOutputStream::kDefaultDevice) {
- // The DeviceNameHint and DeviceNameFreeHint need to be paired to avoid a
- // memory leak.
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<2>(&kFakeHints[0]), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameFreeHint(&kFakeHints[0]))
- .Times(1);
- }
-
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmOpen(_, StrEq(kExpectedDeviceName[i]), _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmSetParams(kFakeHandle, _, _, i, _, _, _))
- .WillOnce(Return(0));
-
- // The parameters are specified by ALSA documentation, and are in constants
- // in the implementation files.
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("IOID")))
- .WillRepeatedly(Invoke(OutputHint));
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("NAME")))
- .WillRepeatedly(Invoke(EchoHint));
-
- AlsaPcmOutputStream* test_stream = CreateStream(kExpectedLayouts[i]);
- EXPECT_TRUE(test_stream->AutoSelectDevice(i));
- EXPECT_EQ(kExpectedDownmix[i],
- static_cast<bool>(test_stream->channel_mixer_));
-
- Mock::VerifyAndClearExpectations(&mock_alsa_wrapper_);
- Mock::VerifyAndClearExpectations(mock_manager_.get());
- test_stream->Close();
- }
-}
-
-TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_FallbackDevices) {
- using std::string;
-
- // If there are problems opening a multi-channel device, it the fallbacks
- // operations should be as follows. Assume the multi-channel device name is
- // surround50:
- //
- // 1) Try open "surround50"
- // 2) Try open "plug:surround50".
- // 3) Try open "default".
- // 4) Try open "plug:default".
- // 5) Give up trying to open.
- //
- const string first_try = kSurround50;
- const string second_try = string(AlsaPcmOutputStream::kPlugPrefix) +
- kSurround50;
- const string third_try = AlsaPcmOutputStream::kDefaultDevice;
- const string fourth_try = string(AlsaPcmOutputStream::kPlugPrefix) +
- AlsaPcmOutputStream::kDefaultDevice;
-
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
- .WillOnce(DoAll(SetArgumentPointee<2>(&kFakeHints[0]), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameFreeHint(&kFakeHints[0]))
- .Times(1);
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("IOID")))
- .WillRepeatedly(Invoke(OutputHint));
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameGetHint(_, StrEq("NAME")))
- .WillRepeatedly(Invoke(EchoHint));
- EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
- .WillRepeatedly(Return(kDummyMessage));
-
- InSequence s;
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(first_try.c_str()), _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(second_try.c_str()), _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(third_try.c_str()), _, _))
- .WillOnce(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_, PcmOpen(_, StrEq(fourth_try.c_str()), _, _))
- .WillOnce(Return(kTestFailedErrno));
-
- AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
- EXPECT_FALSE(test_stream->AutoSelectDevice(5));
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, AutoSelectDevice_HintFail) {
- // Should get |kDefaultDevice|, and force a 2-channel downmix on a failure to
- // enumerate devices.
- EXPECT_CALL(mock_alsa_wrapper_, DeviceNameHint(_, _, _))
- .WillRepeatedly(Return(kTestFailedErrno));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmOpen(_, StrEq(AlsaPcmOutputStream::kDefaultDevice), _, _))
- .WillOnce(DoAll(SetArgumentPointee<0>(kFakeHandle), Return(0)));
- EXPECT_CALL(mock_alsa_wrapper_,
- PcmSetParams(kFakeHandle, _, _, 2, _, _, _))
- .WillOnce(Return(0));
- EXPECT_CALL(mock_alsa_wrapper_, StrError(kTestFailedErrno))
- .WillOnce(Return(kDummyMessage));
-
- AlsaPcmOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_5_0);
- EXPECT_TRUE(test_stream->AutoSelectDevice(5));
- EXPECT_TRUE(test_stream->channel_mixer_);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, BufferPacket_StopStream) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- InitBuffer(test_stream);
- test_stream->stop_stream_ = true;
- bool source_exhausted;
- test_stream->BufferPacket(&source_exhausted);
- EXPECT_EQ(0, test_stream->buffer_->forward_bytes());
- EXPECT_TRUE(source_exhausted);
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, ScheduleNextWrite) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
- InitBuffer(test_stream);
- DVLOG(1) << test_stream->state();
- EXPECT_CALL(mock_alsa_wrapper_, PcmAvailUpdate(_))
- .WillOnce(Return(10));
- test_stream->ScheduleNextWrite(false);
- DVLOG(1) << test_stream->state();
- // TODO(sergeyu): Figure out how to check that the task has been added to the
- // message loop.
-
- // Cleanup the message queue. Currently ~MessageQueue() doesn't free pending
- // tasks unless running on valgrind. The code below is needed to keep
- // heapcheck happy.
-
- test_stream->stop_stream_ = true;
- DVLOG(1) << test_stream->state();
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsClosed);
- DVLOG(1) << test_stream->state();
- test_stream->Close();
-}
-
-TEST_F(AlsaPcmOutputStreamTest, ScheduleNextWrite_StopStream) {
- AlsaPcmOutputStream* test_stream = CreateStream(kTestChannelLayout);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsOpened);
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsPlaying);
-
- InitBuffer(test_stream);
-
- test_stream->stop_stream_ = true;
- test_stream->ScheduleNextWrite(true);
-
- // TODO(ajwong): Find a way to test whether or not another task has been
- // posted so we can verify that the Alsa code will indeed break the task
- // posting loop.
-
- test_stream->TransitionTo(AlsaPcmOutputStream::kIsClosed);
- test_stream->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/alsa_util.cc b/src/media/audio/linux/alsa_util.cc
deleted file mode 100644
index 176ef69..0000000
--- a/src/media/audio/linux/alsa_util.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/linux/alsa_util.h"
-
-#include <string>
-
-#include "base/logging.h"
-#include "media/audio/linux/alsa_wrapper.h"
-
-namespace alsa_util {
-
-static snd_pcm_t* OpenDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- snd_pcm_stream_t type,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us) {
- snd_pcm_t* handle = NULL;
- int error = wrapper->PcmOpen(&handle, device_name, type, SND_PCM_NONBLOCK);
- if (error < 0) {
- LOG(WARNING) << "PcmOpen: " << device_name << ","
- << wrapper->StrError(error);
- return NULL;
- }
-
- error = wrapper->PcmSetParams(handle, pcm_format,
- SND_PCM_ACCESS_RW_INTERLEAVED, channels,
- sample_rate, 1, latency_us);
- if (error < 0) {
- LOG(WARNING) << "PcmSetParams: " << device_name << ", "
- << wrapper->StrError(error) << " - Format: " << pcm_format
- << " Channels: " << channels << " Latency: " << latency_us;
- if (alsa_util::CloseDevice(wrapper, handle) < 0) {
- // TODO(ajwong): Retry on certain errors?
- LOG(WARNING) << "Unable to close audio device. Leaking handle.";
- }
- return NULL;
- }
-
- return handle;
-}
-
-static std::string DeviceNameToControlName(const std::string& device_name) {
- const char kMixerPrefix[] = "hw";
- std::string control_name;
- size_t pos1 = device_name.find(':');
- if (pos1 == std::string::npos) {
- control_name = device_name;
- } else {
- // Examples:
- // deviceName: "front:CARD=Intel,DEV=0", controlName: "hw:CARD=Intel".
- // deviceName: "default:CARD=Intel", controlName: "CARD=Intel".
- size_t pos2 = device_name.find(',');
- control_name = (pos2 == std::string::npos) ?
- device_name.substr(pos1) :
- kMixerPrefix + device_name.substr(pos1, pos2 - pos1);
- }
-
- return control_name;
-}
-
-snd_pcm_format_t BitsToFormat(int bits_per_sample) {
- switch (bits_per_sample) {
- case 8:
- return SND_PCM_FORMAT_U8;
-
- case 16:
- return SND_PCM_FORMAT_S16;
-
- case 24:
- return SND_PCM_FORMAT_S24;
-
- case 32:
- return SND_PCM_FORMAT_S32;
-
- default:
- return SND_PCM_FORMAT_UNKNOWN;
- }
-}
-
-int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle) {
- std::string device_name = wrapper->PcmName(handle);
- int error = wrapper->PcmClose(handle);
- if (error < 0) {
- LOG(ERROR) << "PcmClose: " << device_name << ", "
- << wrapper->StrError(error);
- }
-
- return error;
-}
-
-snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us) {
- return OpenDevice(wrapper, device_name, SND_PCM_STREAM_CAPTURE, channels,
- sample_rate, pcm_format, latency_us);
-}
-
-snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us) {
- return OpenDevice(wrapper, device_name, SND_PCM_STREAM_PLAYBACK, channels,
- sample_rate, pcm_format, latency_us);
-}
-
-snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
- const std::string& device_name) {
- snd_mixer_t* mixer = NULL;
-
- int error = wrapper->MixerOpen(&mixer, 0);
- if (error < 0) {
- LOG(ERROR) << "MixerOpen: " << device_name << ", "
- << wrapper->StrError(error);
- return NULL;
- }
-
- std::string control_name = DeviceNameToControlName(device_name);
- error = wrapper->MixerAttach(mixer, control_name.c_str());
- if (error < 0) {
- LOG(ERROR) << "MixerAttach, " << control_name << ", "
- << wrapper->StrError(error);
- alsa_util::CloseMixer(wrapper, mixer, device_name);
- return NULL;
- }
-
- error = wrapper->MixerElementRegister(mixer, NULL, NULL);
- if (error < 0) {
- LOG(ERROR) << "MixerElementRegister: " << control_name << ", "
- << wrapper->StrError(error);
- alsa_util::CloseMixer(wrapper, mixer, device_name);
- return NULL;
- }
-
- return mixer;
-}
-
-void CloseMixer(media::AlsaWrapper* wrapper, snd_mixer_t* mixer,
- const std::string& device_name) {
- if (!mixer)
- return;
-
- wrapper->MixerFree(mixer);
-
- int error = 0;
- if (!device_name.empty()) {
- std::string control_name = DeviceNameToControlName(device_name);
- error = wrapper->MixerDetach(mixer, control_name.c_str());
- if (error < 0) {
- LOG(WARNING) << "MixerDetach: " << control_name << ", "
- << wrapper->StrError(error);
- }
- }
-
- error = wrapper->MixerClose(mixer);
- if (error < 0) {
- LOG(WARNING) << "MixerClose: " << wrapper->StrError(error);
- }
-}
-
-snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
- snd_mixer_t* mixer) {
- if (!mixer)
- return NULL;
-
- int error = wrapper->MixerLoad(mixer);
- if (error < 0) {
- LOG(ERROR) << "MixerLoad: " << wrapper->StrError(error);
- return NULL;
- }
-
- snd_mixer_elem_t* elem = NULL;
- snd_mixer_elem_t* mic_elem = NULL;
- const char kCaptureElemName[] = "Capture";
- const char kMicElemName[] = "Mic";
- for (elem = wrapper->MixerFirstElem(mixer);
- elem;
- elem = wrapper->MixerNextElem(elem)) {
- if (wrapper->MixerSelemIsActive(elem)) {
- const char* elem_name = wrapper->MixerSelemName(elem);
- if (strcmp(elem_name, kCaptureElemName) == 0)
- return elem;
- else if (strcmp(elem_name, kMicElemName) == 0)
- mic_elem = elem;
- }
- }
-
- // Did not find any Capture handle, use the Mic handle.
- return mic_elem;
-}
-
-} // namespace alsa_util
diff --git a/src/media/audio/linux/alsa_util.h b/src/media/audio/linux/alsa_util.h
deleted file mode 100644
index 53cf80a..0000000
--- a/src/media/audio/linux/alsa_util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
-#define MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
-
-#include <alsa/asoundlib.h>
-#include <string>
-
-namespace media {
-class AlsaWrapper;
-}
-
-namespace alsa_util {
-
-snd_pcm_format_t BitsToFormat(int bits_per_sample);
-
-snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us);
-
-snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us);
-
-int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle);
-
-snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
- const std::string& device_name);
-
-void CloseMixer(media::AlsaWrapper* wrapper,
- snd_mixer_t* mixer,
- const std::string& device_name);
-
-snd_mixer_elem_t* LoadCaptureMixerElement(media::AlsaWrapper* wrapper,
- snd_mixer_t* mixer);
-
-} // namespace alsa_util
-
-#endif // MEDIA_AUDIO_LINUX_ALSA_UTIL_H_
diff --git a/src/media/audio/linux/alsa_wrapper.cc b/src/media/audio/linux/alsa_wrapper.cc
deleted file mode 100644
index c1ce359..0000000
--- a/src/media/audio/linux/alsa_wrapper.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/linux/alsa_wrapper.h"
-
-#include <alsa/asoundlib.h>
-
-namespace media {
-
-AlsaWrapper::AlsaWrapper() {
-}
-
-AlsaWrapper::~AlsaWrapper() {
-}
-
-int AlsaWrapper::PcmOpen(snd_pcm_t** handle, const char* name,
- snd_pcm_stream_t stream, int mode) {
- return snd_pcm_open(handle, name, stream, mode);
-}
-
-int AlsaWrapper::DeviceNameHint(int card, const char* iface, void*** hints) {
- return snd_device_name_hint(card, iface, hints);
-}
-
-char* AlsaWrapper::DeviceNameGetHint(const void* hint, const char* id) {
- return snd_device_name_get_hint(hint, id);
-}
-
-int AlsaWrapper::DeviceNameFreeHint(void** hints) {
- return snd_device_name_free_hint(hints);
-}
-
-int AlsaWrapper::CardNext(int* rcard) {
- return snd_card_next(rcard);
-}
-
-int AlsaWrapper::PcmClose(snd_pcm_t* handle) {
- return snd_pcm_close(handle);
-}
-
-int AlsaWrapper::PcmPrepare(snd_pcm_t* handle) {
- return snd_pcm_prepare(handle);
-}
-
-int AlsaWrapper::PcmDrop(snd_pcm_t* handle) {
- return snd_pcm_drop(handle);
-}
-
-int AlsaWrapper::PcmDelay(snd_pcm_t* handle, snd_pcm_sframes_t* delay) {
- return snd_pcm_delay(handle, delay);
-}
-
-snd_pcm_sframes_t AlsaWrapper::PcmWritei(snd_pcm_t* handle,
- const void* buffer,
- snd_pcm_uframes_t size) {
- return snd_pcm_writei(handle, buffer, size);
-}
-
-snd_pcm_sframes_t AlsaWrapper::PcmReadi(snd_pcm_t* handle,
- void* buffer,
- snd_pcm_uframes_t size) {
- return snd_pcm_readi(handle, buffer, size);
-}
-
-int AlsaWrapper::PcmRecover(snd_pcm_t* handle, int err, int silent) {
- return snd_pcm_recover(handle, err, silent);
-}
-
-const char* AlsaWrapper::PcmName(snd_pcm_t* handle) {
- return snd_pcm_name(handle);
-}
-
-int AlsaWrapper::PcmSetParams(snd_pcm_t* handle, snd_pcm_format_t format,
- snd_pcm_access_t access, unsigned int channels,
- unsigned int rate, int soft_resample,
- unsigned int latency) {
- return snd_pcm_set_params(handle,
- format,
- access,
- channels,
- rate,
- soft_resample,
- latency);
-}
-
-int AlsaWrapper::PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
- snd_pcm_uframes_t* period_size) {
- return snd_pcm_get_params(handle, buffer_size, period_size);
-}
-
-snd_pcm_sframes_t AlsaWrapper::PcmAvailUpdate(snd_pcm_t* handle) {
- return snd_pcm_avail_update(handle);
-}
-
-snd_pcm_state_t AlsaWrapper::PcmState(snd_pcm_t* handle) {
- return snd_pcm_state(handle);
-}
-
-const char* AlsaWrapper::StrError(int errnum) {
- return snd_strerror(errnum);
-}
-
-int AlsaWrapper::PcmStart(snd_pcm_t* handle) {
- return snd_pcm_start(handle);
-}
-
-int AlsaWrapper::MixerOpen(snd_mixer_t** mixer, int mode) {
- return snd_mixer_open(mixer, mode);
-}
-
-int AlsaWrapper::MixerAttach(snd_mixer_t* mixer, const char* name) {
- return snd_mixer_attach(mixer, name);
-}
-
-int AlsaWrapper::MixerElementRegister(snd_mixer_t* mixer,
- struct snd_mixer_selem_regopt* options,
- snd_mixer_class_t** classp) {
- return snd_mixer_selem_register(mixer, options, classp);
-}
-
-void AlsaWrapper::MixerFree(snd_mixer_t* mixer) {
- snd_mixer_free(mixer);
-}
-
-int AlsaWrapper::MixerDetach(snd_mixer_t* mixer, const char* name) {
- return snd_mixer_detach(mixer, name);
-}
-
-int AlsaWrapper::MixerClose(snd_mixer_t* mixer) {
- return snd_mixer_close(mixer);
-}
-
-int AlsaWrapper::MixerLoad(snd_mixer_t* mixer) {
- return snd_mixer_load(mixer);
-}
-
-snd_mixer_elem_t* AlsaWrapper::MixerFirstElem(snd_mixer_t* mixer) {
- return snd_mixer_first_elem(mixer);
-}
-
-snd_mixer_elem_t* AlsaWrapper::MixerNextElem(snd_mixer_elem_t* elem) {
- return snd_mixer_elem_next(elem);
-}
-
-int AlsaWrapper::MixerSelemIsActive(snd_mixer_elem_t* elem) {
- return snd_mixer_selem_is_active(elem);
-}
-
-const char* AlsaWrapper::MixerSelemName(snd_mixer_elem_t* elem) {
- return snd_mixer_selem_get_name(elem);
-}
-
-int AlsaWrapper::MixerSelemSetCaptureVolumeAll(
- snd_mixer_elem_t* elem, long value) {
- return snd_mixer_selem_set_capture_volume_all(elem, value);
-}
-
-int AlsaWrapper::MixerSelemGetCaptureVolume(
- snd_mixer_elem_t* elem, snd_mixer_selem_channel_id_t channel, long* value) {
- return snd_mixer_selem_get_capture_volume(elem, channel, value);
-}
-
-int AlsaWrapper::MixerSelemHasCaptureVolume(snd_mixer_elem_t* elem) {
- return snd_mixer_selem_has_capture_volume(elem);
-}
-
-int AlsaWrapper::MixerSelemGetCaptureVolumeRange(snd_mixer_elem_t* elem,
- long* min, long* max) {
- return snd_mixer_selem_get_capture_volume_range(elem, min, max);
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/alsa_wrapper.h b/src/media/audio/linux/alsa_wrapper.h
deleted file mode 100644
index 30d9463..0000000
--- a/src/media/audio/linux/alsa_wrapper.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// AlsaWrapper is a simple stateless class that wraps the alsa library commands
-// we want to use. It's purpose is to allow injection of a mock so that the
-// higher level code is testable.
-
-#include <alsa/asoundlib.h>
-
-#include "base/basictypes.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class MEDIA_EXPORT AlsaWrapper {
- public:
- AlsaWrapper();
- virtual ~AlsaWrapper();
-
- virtual int DeviceNameHint(int card, const char* iface, void*** hints);
- virtual char* DeviceNameGetHint(const void* hint, const char* id);
- virtual int DeviceNameFreeHint(void** hints);
- virtual int CardNext(int* rcard);
-
- virtual int PcmOpen(snd_pcm_t** handle, const char* name,
- snd_pcm_stream_t stream, int mode);
- virtual int PcmClose(snd_pcm_t* handle);
- virtual int PcmPrepare(snd_pcm_t* handle);
- virtual int PcmDrop(snd_pcm_t* handle);
- virtual int PcmDelay(snd_pcm_t* handle, snd_pcm_sframes_t* delay);
- virtual snd_pcm_sframes_t PcmWritei(snd_pcm_t* handle,
- const void* buffer,
- snd_pcm_uframes_t size);
- virtual snd_pcm_sframes_t PcmReadi(snd_pcm_t* handle,
- void* buffer,
- snd_pcm_uframes_t size);
- virtual int PcmRecover(snd_pcm_t* handle, int err, int silent);
- virtual int PcmSetParams(snd_pcm_t* handle, snd_pcm_format_t format,
- snd_pcm_access_t access, unsigned int channels,
- unsigned int rate, int soft_resample,
- unsigned int latency);
- virtual int PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
- snd_pcm_uframes_t* period_size);
- virtual const char* PcmName(snd_pcm_t* handle);
- virtual snd_pcm_sframes_t PcmAvailUpdate(snd_pcm_t* handle);
- virtual snd_pcm_state_t PcmState(snd_pcm_t* handle);
- virtual int PcmStart(snd_pcm_t* handle);
-
- virtual int MixerOpen(snd_mixer_t** mixer, int mode);
- virtual int MixerAttach(snd_mixer_t* mixer, const char* name);
- virtual int MixerElementRegister(snd_mixer_t* mixer,
- struct snd_mixer_selem_regopt* options,
- snd_mixer_class_t** classp);
- virtual void MixerFree(snd_mixer_t* mixer);
- virtual int MixerDetach(snd_mixer_t* mixer, const char* name);
- virtual int MixerClose(snd_mixer_t* mixer);
- virtual int MixerLoad(snd_mixer_t* mixer);
- virtual snd_mixer_elem_t* MixerFirstElem(snd_mixer_t* mixer);
- virtual snd_mixer_elem_t* MixerNextElem(snd_mixer_elem_t* elem);
- virtual int MixerSelemIsActive(snd_mixer_elem_t* elem);
- virtual const char* MixerSelemName(snd_mixer_elem_t* elem);
- virtual int MixerSelemSetCaptureVolumeAll(snd_mixer_elem_t* elem, long value);
- virtual int MixerSelemGetCaptureVolume(snd_mixer_elem_t* elem,
- snd_mixer_selem_channel_id_t channel,
- long* value);
- virtual int MixerSelemHasCaptureVolume(snd_mixer_elem_t* elem);
- virtual int MixerSelemGetCaptureVolumeRange(snd_mixer_elem_t* elem,
- long* min, long* max);
-
- virtual const char* StrError(int errnum);
-
- private:
- int ConfigureHwParams(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params,
- snd_pcm_format_t format, snd_pcm_access_t access,
- unsigned int channels, unsigned int rate,
- int soft_resample, unsigned int latency);
- DISALLOW_COPY_AND_ASSIGN(AlsaWrapper);
-};
-
-} // namespace media
diff --git a/src/media/audio/linux/audio_manager_linux.cc b/src/media/audio/linux/audio_manager_linux.cc
deleted file mode 100644
index 48be0b7..0000000
--- a/src/media/audio/linux/audio_manager_linux.cc
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/linux/audio_manager_linux.h"
-
-#include "base/command_line.h"
-#include "base/environment.h"
-#include "base/logging.h"
-#include "base/nix/xdg_util.h"
-#include "base/process_util.h"
-#include "base/stl_util.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/linux/alsa_input.h"
-#include "media/audio/linux/alsa_output.h"
-#include "media/audio/linux/alsa_wrapper.h"
-#if defined(USE_PULSEAUDIO)
-#include "media/audio/pulse/pulse_output.h"
-#endif
-#if defined(USE_CRAS)
-#include "media/audio/linux/cras_input.h"
-#include "media/audio/linux/cras_output.h"
-#endif
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-// Since "default", "pulse" and "dmix" devices are virtual devices mapped to
-// real devices, we remove them from the list to avoiding duplicate counting.
-// In addition, note that we support no more than 2 channels for recording,
-// hence surround devices are not stored in the list.
-static const char* kInvalidAudioInputDevices[] = {
- "default",
- "null",
- "pulse",
- "dmix",
- "surround",
-};
-
-static const char kCrasAutomaticDeviceName[] = "Automatic";
-static const char kCrasAutomaticDeviceId[] = "automatic";
-
-// Implementation of AudioManager.
-bool AudioManagerLinux::HasAudioOutputDevices() {
- if (UseCras())
- return true;
-
- return HasAnyAlsaAudioDevice(kStreamPlayback);
-}
-
-bool AudioManagerLinux::HasAudioInputDevices() {
- if (UseCras())
- return true;
-
- return HasAnyAlsaAudioDevice(kStreamCapture);
-}
-
-AudioManagerLinux::AudioManagerLinux()
- : wrapper_(new AlsaWrapper()) {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-}
-
-AudioManagerLinux::~AudioManagerLinux() {
- Shutdown();
-}
-
-bool AudioManagerLinux::CanShowAudioInputSettings() {
- scoped_ptr<base::Environment> env(base::Environment::Create());
-
- switch (base::nix::GetDesktopEnvironment(env.get())) {
- case base::nix::DESKTOP_ENVIRONMENT_GNOME:
- case base::nix::DESKTOP_ENVIRONMENT_KDE3:
- case base::nix::DESKTOP_ENVIRONMENT_KDE4:
- return true;
- case base::nix::DESKTOP_ENVIRONMENT_OTHER:
- case base::nix::DESKTOP_ENVIRONMENT_UNITY:
- case base::nix::DESKTOP_ENVIRONMENT_XFCE:
- return false;
- }
- // Unless GetDesktopEnvironment() badly misbehaves, this should never happen.
- NOTREACHED();
- return false;
-}
-
-void AudioManagerLinux::ShowAudioInputSettings() {
- scoped_ptr<base::Environment> env(base::Environment::Create());
- base::nix::DesktopEnvironment desktop = base::nix::GetDesktopEnvironment(
- env.get());
- std::string command((desktop == base::nix::DESKTOP_ENVIRONMENT_GNOME) ?
- "gnome-volume-control" : "kmix");
- base::LaunchProcess(CommandLine(FilePath(command)), base::LaunchOptions(),
- NULL);
-}
-
-void AudioManagerLinux::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- if (UseCras()) {
- GetCrasAudioInputDevices(device_names);
- return;
- }
-
- GetAlsaAudioInputDevices(device_names);
-}
-
-bool AudioManagerLinux::UseCras() {
-#if defined(USE_CRAS)
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
- return true;
- }
-#endif
- return false;
-}
-
-void AudioManagerLinux::GetCrasAudioInputDevices(
- media::AudioDeviceNames* device_names) {
- // Cras will route audio from a proper physical device automatically.
- device_names->push_back(media::AudioDeviceName(
- kCrasAutomaticDeviceName, kCrasAutomaticDeviceId));
-}
-
-void AudioManagerLinux::GetAlsaAudioInputDevices(
- media::AudioDeviceNames* device_names) {
- // Constants specified by the ALSA API for device hints.
- static const char kPcmInterfaceName[] = "pcm";
- int card = -1;
-
- // Loop through the sound cards to get ALSA device hints.
- while (!wrapper_->CardNext(&card) && card >= 0) {
- void** hints = NULL;
- int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
- if (!error) {
- GetAlsaDevicesInfo(hints, device_names);
-
- // Destroy the hints now that we're done with it.
- wrapper_->DeviceNameFreeHint(hints);
- } else {
- DLOG(WARNING) << "GetAudioInputDevices: unable to get device hints: "
- << wrapper_->StrError(error);
- }
- }
-}
-
-void AudioManagerLinux::GetAlsaDevicesInfo(
- void** hints, media::AudioDeviceNames* device_names) {
- static const char kIoHintName[] = "IOID";
- static const char kNameHintName[] = "NAME";
- static const char kDescriptionHintName[] = "DESC";
- static const char kOutputDevice[] = "Output";
-
- for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are input capable. Valid values are
- // "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
- if (io != NULL && strcmp(kOutputDevice, io.get()) == 0)
- continue;
-
- // Found an input device, prepend the default device since we always want
- // it to be on the top of the list for all platforms. And there is no
- // duplicate counting here since it is only done if the list is still empty.
- // Note, pulse has exclusively opened the default device, so we must open
- // the device via the "default" moniker.
- if (device_names->empty()) {
- device_names->push_front(media::AudioDeviceName(
- AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
- }
-
- // Get the unique device name for the device.
- scoped_ptr_malloc<char> unique_device_name(
- wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
-
- // Find out if the device is available.
- if (IsAlsaDeviceAvailable(unique_device_name.get())) {
- // Get the description for the device.
- scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
- *hint_iter, kDescriptionHintName));
-
- media::AudioDeviceName name;
- name.unique_id = unique_device_name.get();
- if (desc.get()) {
- // Use the more user friendly description as name.
- // Replace '\n' with '-'.
- char* pret = strchr(desc.get(), '\n');
- if (pret)
- *pret = '-';
- name.device_name = desc.get();
- } else {
- // Virtual devices don't necessarily have descriptions.
- // Use their names instead.
- name.device_name = unique_device_name.get();
- }
-
- // Store the device information.
- device_names->push_back(name);
- }
- }
-}
-
-bool AudioManagerLinux::IsAlsaDeviceAvailable(const char* device_name) {
- if (!device_name)
- return false;
-
- // Check if the device is in the list of invalid devices.
- for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
- if (strncmp(kInvalidAudioInputDevices[i], device_name,
- strlen(kInvalidAudioInputDevices[i])) == 0)
- return false;
- }
-
- return true;
-}
-
-bool AudioManagerLinux::HasAnyAlsaAudioDevice(StreamType stream) {
- static const char kPcmInterfaceName[] = "pcm";
- static const char kIoHintName[] = "IOID";
- const char* kNotWantedDevice =
- (stream == kStreamPlayback ? "Input" : "Output");
- void** hints = NULL;
- bool has_device = false;
- int card = -1;
-
- // Loop through the sound cards.
- // Don't use snd_device_name_hint(-1,..) since there is a access violation
- // inside this ALSA API with libasound.so.2.0.0.
- while (!wrapper_->CardNext(&card) && (card >= 0) && !has_device) {
- int error = wrapper_->DeviceNameHint(card, kPcmInterfaceName, &hints);
- if (!error) {
- for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
- // Only examine devices that are |stream| capable. Valid values are
- // "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
- if (io != NULL && strcmp(kNotWantedDevice, io.get()) == 0)
- continue; // Wrong type, skip the device.
-
- // Found an input device.
- has_device = true;
- break;
- }
-
- // Destroy the hints now that we're done with it.
- wrapper_->DeviceNameFreeHint(hints);
- hints = NULL;
- } else {
- DLOG(WARNING) << "HasAnyAudioDevice: unable to get device hints: "
- << wrapper_->StrError(error);
- }
- }
-
- return has_device;
-}
-
-AudioOutputStream* AudioManagerLinux::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeOutputStream(params);
-}
-
-AudioOutputStream* AudioManagerLinux::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeOutputStream(params);
-}
-
-AudioInputStream* AudioManagerLinux::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeInputStream(params, device_id);
-}
-
-AudioInputStream* AudioManagerLinux::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeInputStream(params, device_id);
-}
-
-AudioOutputStream* AudioManagerLinux::MakeOutputStream(
- const AudioParameters& params) {
-#if defined(USE_CRAS)
- if (UseCras()) {
- return new CrasOutputStream(params, this);
- }
-#endif
-
-#if defined(USE_PULSEAUDIO)
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUsePulseAudio)) {
- return new PulseAudioOutputStream(params, this);
- }
-#endif
-
- std::string device_name = AlsaPcmOutputStream::kAutoSelectDevice;
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kAlsaOutputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kAlsaOutputDevice);
- }
- return new AlsaPcmOutputStream(device_name, params, wrapper_.get(), this);
-}
-
-AudioInputStream* AudioManagerLinux::MakeInputStream(
- const AudioParameters& params, const std::string& device_id) {
-#if defined(USE_CRAS)
- if (UseCras()) {
- return new CrasInputStream(params, this);
- }
-#endif
-
- std::string device_name = (device_id == AudioManagerBase::kDefaultDeviceId) ?
- AlsaPcmInputStream::kAutoSelectDevice : device_id;
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kAlsaInputDevice)) {
- device_name = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kAlsaInputDevice);
- }
-
- return new AlsaPcmInputStream(this, device_name, params, wrapper_.get());
-}
-
-AudioManager* CreateAudioManager() {
- return new AudioManagerLinux();
-}
-
-AudioParameters AudioManagerLinux::GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) {
- // Since Linux doesn't actually have a low latency path the hardware buffer
- // size is quite large in order to prevent glitches with general usage. Some
- // clients, such as WebRTC, have a more limited use case and work acceptably
- // with a smaller buffer size. The check below allows clients which want to
- // try a smaller buffer size on Linux to do so.
- int buffer_size = GetAudioHardwareBufferSize();
- if (input_params.frames_per_buffer() < buffer_size)
- buffer_size = input_params.frames_per_buffer();
-
- int sample_rate = GetAudioHardwareSampleRate();
- // CRAS will sample rate convert if needed, so pass through input sample rate.
- if (UseCras())
- sample_rate = input_params.sample_rate();
-
- // TODO(dalecurtis): This should include bits per channel and channel layout
- // eventually.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, input_params.channel_layout(),
- sample_rate, 16, buffer_size);
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/audio_manager_linux.h b/src/media/audio/linux/audio_manager_linux.h
deleted file mode 100644
index 7aab32a..0000000
--- a/src/media/audio/linux/audio_manager_linux.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
-#define MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
-
-#include <string>
-#include "base/compiler_specific.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class AlsaWrapper;
-
-class MEDIA_EXPORT AudioManagerLinux : public AudioManagerBase {
- public:
- AudioManagerLinux();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual bool CanShowAudioInputSettings() OVERRIDE;
- virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioParameters GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) OVERRIDE;
-
- protected:
- virtual ~AudioManagerLinux();
-
- private:
- enum StreamType {
- kStreamPlayback = 0,
- kStreamCapture,
- };
-
- // Returns true if cras should be used for input/output.
- bool UseCras();
-
- // Gets a list of available cras input devices.
- void GetCrasAudioInputDevices(media::AudioDeviceNames* device_names);
-
- // Gets a list of available ALSA input devices.
- void GetAlsaAudioInputDevices(media::AudioDeviceNames* device_names);
-
- // Gets the ALSA devices' names and ids.
- void GetAlsaDevicesInfo(void** hint, media::AudioDeviceNames* device_names);
-
- // Checks if the specific ALSA device is available.
- bool IsAlsaDeviceAvailable(const char* device_name);
-
- // Returns true if a device is present for the given stream type.
- bool HasAnyAlsaAudioDevice(StreamType stream);
-
- // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
- AudioOutputStream* MakeOutputStream(const AudioParameters& params);
-
- // Called by MakeLinearInputStream and MakeLowLatencyInputStream.
- AudioInputStream* MakeInputStream(const AudioParameters& params,
- const std::string& device_id);
-
- scoped_ptr<AlsaWrapper> wrapper_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerLinux);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_LINUX_AUDIO_MANAGER_LINUX_H_
diff --git a/src/media/audio/linux/cras_input.cc b/src/media/audio/linux/cras_input.cc
deleted file mode 100644
index 6de405e..0000000
--- a/src/media/audio/linux/cras_input.cc
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/linux/cras_input.h"
-
-#include <math.h>
-
-#include "base/basictypes.h"
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/time.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/audio_manager_linux.h"
-
-namespace media {
-
-CrasInputStream::CrasInputStream(const AudioParameters& params,
- AudioManagerLinux* manager)
- : audio_manager_(manager),
- bytes_per_frame_(0),
- callback_(NULL),
- client_(NULL),
- params_(params),
- started_(false),
- stream_id_(0) {
- DCHECK(audio_manager_);
-}
-
-CrasInputStream::~CrasInputStream() {
- DCHECK(!client_);
-}
-
-bool CrasInputStream::Open() {
- if (client_) {
- NOTREACHED() << "CrasInputStream already open";
- return false; // Already open.
- }
-
- // Sanity check input values.
- if (params_.sample_rate() <= 0) {
- DLOG(WARNING) << "Unsupported audio frequency.";
- return false;
- }
-
- if (AudioParameters::AUDIO_PCM_LINEAR != params_.format() &&
- AudioParameters::AUDIO_PCM_LOW_LATENCY != params_.format()) {
- DLOG(WARNING) << "Unsupported audio format.";
- return false;
- }
-
- snd_pcm_format_t pcm_format =
- alsa_util::BitsToFormat(params_.bits_per_sample());
- if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
- DLOG(WARNING) << "Unsupported bits/sample: " << params_.bits_per_sample();
- return false;
- }
-
- // Create the client and connect to the CRAS server.
- if (cras_client_create(&client_) < 0) {
- DLOG(WARNING) << "Couldn't create CRAS client.\n";
- client_ = NULL;
- return false;
- }
-
- if (cras_client_connect(client_)) {
- DLOG(WARNING) << "Couldn't connect CRAS client.\n";
- cras_client_destroy(client_);
- client_ = NULL;
- return false;
- }
-
- // Then start running the client.
- if (cras_client_run_thread(client_)) {
- DLOG(WARNING) << "Couldn't run CRAS client.\n";
- cras_client_destroy(client_);
- client_ = NULL;
- return false;
- }
-
- return true;
-}
-
-void CrasInputStream::Close() {
- if (client_) {
- cras_client_stop(client_);
- cras_client_destroy(client_);
- client_ = NULL;
- }
-
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
-
- // Signal to the manager that we're closed and can be removed.
- // Should be last call in the method as it deletes "this".
- audio_manager_->ReleaseInputStream(this);
-}
-
-void CrasInputStream::Start(AudioInputCallback* callback) {
- DCHECK(client_);
- DCHECK(callback);
-
- // If already playing, stop before re-starting.
- if (started_)
- return;
-
- callback_ = callback;
-
- // Prepare |audio_format| and |stream_params| for the stream we
- // will create.
- cras_audio_format* audio_format = cras_audio_format_create(
- alsa_util::BitsToFormat(params_.bits_per_sample()),
- params_.sample_rate(),
- params_.channels());
- if (!audio_format) {
- DLOG(WARNING) << "Error setting up audio parameters.";
- callback_->OnError(this, -ENOMEM);
- callback_ = NULL;
- return;
- }
-
- unsigned int frames_per_packet = params_.frames_per_buffer();
- cras_stream_params* stream_params = cras_client_stream_params_create(
- CRAS_STREAM_INPUT,
- frames_per_packet, // Total latency.
- frames_per_packet, // Call back when this many ready.
- frames_per_packet, // Minimum Callback level ignored for capture streams.
- CRAS_STREAM_TYPE_DEFAULT,
- 0, // Unused flags.
- this,
- CrasInputStream::SamplesReady,
- CrasInputStream::StreamError,
- audio_format);
- if (!stream_params) {
- DLOG(WARNING) << "Error setting up stream parameters.";
- callback_->OnError(this, -ENOMEM);
- callback_ = NULL;
- cras_audio_format_destroy(audio_format);
- return;
- }
-
- // Before starting the stream, save the number of bytes in a frame for use in
- // the callback.
- bytes_per_frame_ = cras_client_format_bytes_per_frame(audio_format);
-
- // Adding the stream will start the audio callbacks.
- if (cras_client_add_stream(client_, &stream_id_, stream_params) == 0) {
- audio_manager_->IncreaseActiveInputStreamCount();
- } else {
- DLOG(WARNING) << "Failed to add the stream.";
- callback_->OnError(this, -EIO);
- callback_ = NULL;
- }
-
- // Done with config params.
- cras_audio_format_destroy(audio_format);
- cras_client_stream_params_destroy(stream_params);
-
- started_ = true;
-}
-
-void CrasInputStream::Stop() {
- DCHECK(client_);
-
- if (!callback_ || !started_)
- return;
-
- // Removing the stream from the client stops audio.
- cras_client_rm_stream(client_, stream_id_);
-
- audio_manager_->DecreaseActiveInputStreamCount();
-
- started_ = false;
-}
-
-// Static callback asking for samples. Run on high priority thread.
-int CrasInputStream::SamplesReady(cras_client* client,
- cras_stream_id_t stream_id,
- uint8* samples,
- size_t frames,
- const timespec* sample_ts,
- void* arg) {
- CrasInputStream* me = static_cast<CrasInputStream*>(arg);
- me->ReadAudio(frames, samples, sample_ts);
- return frames;
-}
-
-// Static callback for stream errors.
-int CrasInputStream::StreamError(cras_client* client,
- cras_stream_id_t stream_id,
- int err,
- void* arg) {
- CrasInputStream* me = static_cast<CrasInputStream*>(arg);
- me->NotifyStreamError(err);
- return 0;
-}
-
-void CrasInputStream::ReadAudio(size_t frames,
- uint8* buffer,
- const timespec* sample_ts) {
- DCHECK(callback_);
-
- timespec latency_ts = {0, 0};
-
- // Determine latency and pass that on to the sink. sample_ts is the wall time
- // indicating when the first sample in the buffer was captured. Convert that
- // to latency in bytes.
- cras_client_calc_capture_latency(sample_ts, &latency_ts);
- double latency_usec =
- latency_ts.tv_sec * base::Time::kMicrosecondsPerSecond +
- latency_ts.tv_nsec / base::Time::kNanosecondsPerMicrosecond;
- double frames_latency =
- latency_usec * params_.sample_rate() / base::Time::kMicrosecondsPerSecond;
- unsigned int bytes_latency =
- static_cast<unsigned int>(frames_latency * bytes_per_frame_);
-
- // Update the AGC volume level once every second. Note that, |volume| is
- // also updated each time SetVolume() is called through IPC by the
- // render-side AGC.
- double normalized_volume = 0.0;
- QueryAgcVolume(&normalized_volume);
-
- callback_->OnData(this,
- buffer,
- frames * bytes_per_frame_,
- bytes_latency,
- normalized_volume);
-}
-
-void CrasInputStream::NotifyStreamError(int err) {
- if (callback_)
- callback_->OnError(this, err);
-}
-
-double CrasInputStream::GetMaxVolume() {
- DCHECK(client_);
-
- // Capture gain is returned as dB * 100 (150 => 1.5dBFS). Convert the dB
- // value to a ratio before returning.
- double dB = cras_client_get_system_max_capture_gain(client_) / 100.0;
- return GetVolumeRatioFromDecibels(dB);
-}
-
-void CrasInputStream::SetVolume(double volume) {
- DCHECK(client_);
-
- // Convert from the passed volume ratio, to dB * 100.
- double dB = GetDecibelsFromVolumeRatio(volume);
- cras_client_set_system_capture_gain(client_, static_cast<long>(dB * 100.0));
-
- // Update the AGC volume level based on the last setting above. Note that,
- // the volume-level resolution is not infinite and it is therefore not
- // possible to assume that the volume provided as input parameter can be
- // used directly. Instead, a new query to the audio hardware is required.
- // This method does nothing if AGC is disabled.
- UpdateAgcVolume();
-}
-
-double CrasInputStream::GetVolume() {
- if (!client_)
- return 0.0;
-
- long dB = cras_client_get_system_capture_gain(client_) / 100.0;
- return GetVolumeRatioFromDecibels(dB);
-}
-
-double CrasInputStream::GetVolumeRatioFromDecibels(double dB) const {
- return pow(10, dB / 20.0);
-}
-
-double CrasInputStream::GetDecibelsFromVolumeRatio(double volume_ratio) const {
- return 20 * log10(volume_ratio);
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/cras_input.h b/src/media/audio/linux/cras_input.h
deleted file mode 100644
index 87891da..0000000
--- a/src/media/audio/linux/cras_input.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_LINUX_CRAS_INPUT_H_
-#define MEDIA_AUDIO_LINUX_CRAS_INPUT_H_
-
-#include <cras_client.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "media/audio/audio_input_stream_impl.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerLinux;
-
-// Provides an input stream for audio capture based on CRAS, the ChromeOS Audio
-// Server. This object is not thread safe and all methods should be invoked in
-// the thread that created the object.
-class CrasInputStream : public AudioInputStreamImpl {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // audio manager who is creating this object.
- CrasInputStream(const AudioParameters& params, AudioManagerLinux* manager);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~CrasInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
-
- private:
- // Handles requests to get samples from the provided buffer. This will be
- // called by the audio server when it has samples ready.
- static int SamplesReady(cras_client* client,
- cras_stream_id_t stream_id,
- uint8* samples,
- size_t frames,
- const timespec* sample_ts,
- void* arg);
-
- // Handles notificaiton that there was an error with the playback stream.
- static int StreamError(cras_client* client,
- cras_stream_id_t stream_id,
- int err,
- void* arg);
-
- // Reads one or more buffers of audio from the device, passes on to the
- // registered callback. Called from SamplesReady().
- void ReadAudio(size_t frames, uint8* buffer, const timespec* sample_ts);
-
- // Deals with an error that occured in the stream. Called from StreamError().
- void NotifyStreamError(int err);
-
- // Convert from dB * 100 to a volume ratio.
- double GetVolumeRatioFromDecibels(double dB) const;
-
- // Convert from a volume ratio to dB.
- double GetDecibelsFromVolumeRatio(double volume_ratio) const;
-
- // Non-refcounted pointer back to the audio manager.
- // The AudioManager indirectly holds on to stream objects, so we don't
- // want circular references. Additionally, stream objects live on the audio
- // thread, which is owned by the audio manager and we don't want to addref
- // the manager from that thread.
- AudioManagerLinux* audio_manager_;
-
- // Size of frame in bytes.
- uint32 bytes_per_frame_;
-
- // Callback to pass audio samples too, valid while recording.
- AudioInputCallback* callback_;
-
- // The client used to communicate with the audio server.
- cras_client* client_;
-
- // PCM parameters for the stream.
- AudioParameters params_;
-
- // True if the stream has been started.
- bool started_;
-
- // ID of the playing stream.
- cras_stream_id_t stream_id_;
-
- DISALLOW_COPY_AND_ASSIGN(CrasInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_LINUX_ALSA_INPUT_H_
diff --git a/src/media/audio/linux/cras_input_unittest.cc b/src/media/audio/linux/cras_input_unittest.cc
deleted file mode 100644
index 5d9a1c8..0000000
--- a/src/media/audio/linux/cras_input_unittest.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <unistd.h>
-
-#include <string>
-
-#include "base/synchronization/waitable_event.h"
-#include "base/test/test_timeouts.h"
-#include "base/time.h"
-#include "media/audio/linux/audio_manager_linux.h"
-#include "media/audio/linux/cras_input.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using testing::_;
-using testing::AtLeast;
-using testing::Ge;
-using testing::InvokeWithoutArgs;
-using testing::StrictMock;
-
-namespace media {
-
-class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
- public:
- MOCK_METHOD5(OnData, void(
- AudioInputStream*, const uint8*, uint32, uint32, double));
- MOCK_METHOD2(OnError, void(AudioInputStream*, int));
- MOCK_METHOD1(OnClose, void(AudioInputStream*));
-};
-
-class MockAudioManagerLinuxInput : public AudioManagerLinux {
- public:
- // We need to override this function in order to skip checking the number
- // of active output streams. It is because the number of active streams
- // is managed inside MakeAudioInputStream, and we don't use
- // MakeAudioInputStream to create the stream in the tests.
- virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE {
- DCHECK(stream);
- delete stream;
- }
-};
-
-class CrasInputStreamTest : public testing::Test {
- protected:
- CrasInputStreamTest() {
- mock_manager_.reset(new StrictMock<MockAudioManagerLinuxInput>());
- }
-
- virtual ~CrasInputStreamTest() {
- }
-
- CrasInputStream* CreateStream(ChannelLayout layout) {
- return CreateStream(layout, kTestFramesPerPacket);
- }
-
- CrasInputStream* CreateStream(ChannelLayout layout,
- int32 samples_per_packet) {
- AudioParameters params(kTestFormat,
- layout,
- kTestSampleRate,
- kTestBitsPerSample,
- samples_per_packet);
- return new CrasInputStream(params, mock_manager_.get());
- }
-
- void CaptureSomeFrames(const AudioParameters ¶ms,
- unsigned int duration_ms) {
- CrasInputStream* test_stream = new CrasInputStream(params,
- mock_manager_.get());
-
- ASSERT_TRUE(test_stream->Open());
-
- // Allow 8 frames variance for SRC in the callback. Different numbers of
- // samples can be provided when doing non-integer SRC. For example
- // converting from 192k to 44.1k is a ratio of 4.35 to 1.
- MockAudioInputCallback mock_callback;
- unsigned int expected_size = (kTestFramesPerPacket - 8) *
- params.channels() *
- params.bits_per_sample() / 8;
-
- base::WaitableEvent event(false, false);
-
- EXPECT_CALL(mock_callback,
- OnData(test_stream, _, Ge(expected_size), _, _))
- .WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
-
- test_stream->Start(&mock_callback);
-
- // Wait for samples to be captured.
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
-
- test_stream->Stop();
-
- EXPECT_CALL(mock_callback, OnClose(test_stream)).Times(1);
- test_stream->Close();
- }
-
- static const unsigned int kTestBitsPerSample;
- static const unsigned int kTestCaptureDurationMs;
- static const ChannelLayout kTestChannelLayout;
- static const AudioParameters::Format kTestFormat;
- static const uint32 kTestFramesPerPacket;
- static const int kTestSampleRate;
-
- scoped_ptr<StrictMock<MockAudioManagerLinuxInput> > mock_manager_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CrasInputStreamTest);
-};
-
-const unsigned int CrasInputStreamTest::kTestBitsPerSample = 16;
-const unsigned int CrasInputStreamTest::kTestCaptureDurationMs = 250;
-const ChannelLayout CrasInputStreamTest::kTestChannelLayout =
- CHANNEL_LAYOUT_STEREO;
-const AudioParameters::Format CrasInputStreamTest::kTestFormat =
- AudioParameters::AUDIO_PCM_LINEAR;
-const uint32 CrasInputStreamTest::kTestFramesPerPacket = 1000;
-const int CrasInputStreamTest::kTestSampleRate = 44100;
-
-TEST_F(CrasInputStreamTest, OpenMono) {
- CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- EXPECT_TRUE(test_stream->Open());
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, OpenStereo) {
- CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_STEREO);
- EXPECT_TRUE(test_stream->Open());
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, BadBitsPerSample) {
- AudioParameters bad_bps_params(kTestFormat,
- kTestChannelLayout,
- kTestSampleRate,
- kTestBitsPerSample - 1,
- kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_bps_params, mock_manager_.get());
- EXPECT_FALSE(test_stream->Open());
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, BadFormat) {
- AudioParameters bad_format_params(AudioParameters::AUDIO_LAST_FORMAT,
- kTestChannelLayout,
- kTestSampleRate,
- kTestBitsPerSample,
- kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_format_params, mock_manager_.get());
- EXPECT_FALSE(test_stream->Open());
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, BadSampleRate) {
- AudioParameters bad_rate_params(kTestFormat,
- kTestChannelLayout,
- 0,
- kTestBitsPerSample,
- kTestFramesPerPacket);
- CrasInputStream* test_stream =
- new CrasInputStream(bad_rate_params, mock_manager_.get());
- EXPECT_FALSE(test_stream->Open());
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, SetGetVolume) {
- CrasInputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- EXPECT_TRUE(test_stream->Open());
-
- double max_volume = test_stream->GetMaxVolume();
- EXPECT_GE(max_volume, 1.0);
-
- test_stream->SetVolume(max_volume / 2);
-
- double new_volume = test_stream->GetVolume();
-
- EXPECT_GE(new_volume, 0.0);
- EXPECT_LE(new_volume, max_volume);
-
- test_stream->Close();
-}
-
-TEST_F(CrasInputStreamTest, CaptureFrames) {
- const unsigned int rates[] =
- {8000, 16000, 22050, 32000, 44100, 48000, 96000, 192000};
-
- for (unsigned int i = 0; i < ARRAY_SIZE(rates); i++) {
- SCOPED_TRACE(testing::Message() << "Mono " << rates[i] << "Hz");
- AudioParameters params_mono(kTestFormat,
- CHANNEL_LAYOUT_MONO,
- rates[i],
- kTestBitsPerSample,
- kTestFramesPerPacket);
- CaptureSomeFrames(params_mono, kTestCaptureDurationMs);
- }
-
- for (unsigned int i = 0; i < ARRAY_SIZE(rates); i++) {
- SCOPED_TRACE(testing::Message() << "Stereo " << rates[i] << "Hz");
- AudioParameters params_stereo(kTestFormat,
- CHANNEL_LAYOUT_STEREO,
- rates[i],
- kTestBitsPerSample,
- kTestFramesPerPacket);
- CaptureSomeFrames(params_stereo, kTestCaptureDurationMs);
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/cras_output.cc b/src/media/audio/linux/cras_output.cc
deleted file mode 100644
index 429ffc2..0000000
--- a/src/media/audio/linux/cras_output.cc
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// The object has one error state: |state_| == kInError. When |state_| ==
-// kInError, all public API functions will fail with an error (Start() will call
-// the OnError() function on the callback immediately), or no-op themselves with
-// the exception of Close(). Even if an error state has been entered, if Open()
-// has previously returned successfully, Close() must be called.
-
-#include "media/audio/linux/cras_output.h"
-
-#include <cras_client.h>
-
-#include "base/logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/linux/alsa_util.h"
-#include "media/audio/linux/audio_manager_linux.h"
-
-namespace media {
-
-// Helps make log messages readable.
-std::ostream& operator<<(std::ostream& os,
- CrasOutputStream::InternalState state) {
- switch (state) {
- case CrasOutputStream::kInError:
- os << "kInError";
- break;
- case CrasOutputStream::kCreated:
- os << "kCreated";
- break;
- case CrasOutputStream::kIsOpened:
- os << "kIsOpened";
- break;
- case CrasOutputStream::kIsPlaying:
- os << "kIsPlaying";
- break;
- case CrasOutputStream::kIsStopped:
- os << "kIsStopped";
- break;
- case CrasOutputStream::kIsClosed:
- os << "kIsClosed";
- break;
- default:
- os << "UnknownState";
- break;
- };
- return os;
-}
-
-// Overview of operation:
-// 1) An object of CrasOutputStream is created by the AudioManager
-// factory: audio_man->MakeAudioStream().
-// 2) Next some thread will call Open(), at that point a client is created and
-// configured for the correct format and sample rate.
-// 3) Then Start(source) is called and a stream is added to the CRAS client
-// which will create its own thread that periodically calls the source for more
-// data as buffers are being consumed.
-// 4) When finished Stop() is called, which is handled by stopping the stream.
-// 5) Finally Close() is called. It cleans up and notifies the audio manager,
-// which likely will destroy this object.
-
-CrasOutputStream::CrasOutputStream(const AudioParameters& params,
- AudioManagerLinux* manager)
- : client_(NULL),
- stream_id_(0),
- samples_per_packet_(params.frames_per_buffer()),
- bytes_per_frame_(0),
- frame_rate_(params.sample_rate()),
- num_channels_(params.channels()),
- pcm_format_(alsa_util::BitsToFormat(params.bits_per_sample())),
- state_(kCreated),
- volume_(1.0),
- manager_(manager),
- source_callback_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- // We must have a manager.
- DCHECK(manager_);
-
- // Sanity check input values.
- if (params.sample_rate() <= 0) {
- LOG(WARNING) << "Unsupported audio frequency.";
- TransitionTo(kInError);
- return;
- }
-
- if (AudioParameters::AUDIO_PCM_LINEAR != params.format() &&
- AudioParameters::AUDIO_PCM_LOW_LATENCY != params.format()) {
- LOG(WARNING) << "Unsupported audio format.";
- TransitionTo(kInError);
- return;
- }
-
- if (pcm_format_ == SND_PCM_FORMAT_UNKNOWN) {
- LOG(WARNING) << "Unsupported bits per sample: " << params.bits_per_sample();
- TransitionTo(kInError);
- return;
- }
-}
-
-CrasOutputStream::~CrasOutputStream() {
- InternalState current_state = state();
- DCHECK(current_state == kCreated ||
- current_state == kIsClosed ||
- current_state == kInError);
-}
-
-bool CrasOutputStream::Open() {
- if (!CanTransitionTo(kIsOpened)) {
- NOTREACHED() << "Invalid state: " << state();
- return false;
- }
-
- // We do not need to check if the transition was successful because
- // CanTransitionTo() was checked above, and it is assumed that this
- // object's public API is only called on one thread so the state cannot
- // transition out from under us.
- TransitionTo(kIsOpened);
-
- // Create the client and connect to the CRAS server.
- int err = cras_client_create(&client_);
- if (err < 0) {
- LOG(WARNING) << "Couldn't create CRAS client.\n";
- client_ = NULL;
- TransitionTo(kInError);
- return false;
- }
- err = cras_client_connect(client_);
- if (err) {
- LOG(WARNING) << "Couldn't connect CRAS client.\n";
- cras_client_destroy(client_);
- client_ = NULL;
- TransitionTo(kInError);
- return false;
- }
- // Then start running the client.
- err = cras_client_run_thread(client_);
- if (err) {
- LOG(WARNING) << "Couldn't run CRAS client.\n";
- cras_client_destroy(client_);
- client_ = NULL;
- TransitionTo(kInError);
- return false;
- }
-
- return true;
-}
-
-void CrasOutputStream::Close() {
- // Sanity Check that we can transition to closed.
- if (TransitionTo(kIsClosed) != kIsClosed) {
- NOTREACHED() << "Unable to transition Closed.";
- return;
- }
-
- if (client_) {
- cras_client_stop(client_);
- cras_client_destroy(client_);
- client_ = NULL;
- }
-
- // Signal to the manager that we're closed and can be removed.
- // Should be last call in the method as it deletes "this".
- manager_->ReleaseOutputStream(this);
-}
-
-void CrasOutputStream::Start(AudioSourceCallback* callback) {
- CHECK(callback);
- source_callback_ = callback;
-
- // Only start if we can enter the playing state.
- if (TransitionTo(kIsPlaying) != kIsPlaying)
- return;
-
- // Prepare |audio_format| and |stream_params| for the stream we
- // will create.
- cras_audio_format* audio_format = cras_audio_format_create(
- pcm_format_,
- frame_rate_,
- num_channels_);
- if (audio_format == NULL) {
- LOG(WARNING) << "Error setting up audio parameters.";
- TransitionTo(kInError);
- callback->OnError(this, -ENOMEM);
- return;
- }
- cras_stream_params* stream_params = cras_client_stream_params_create(
- CRAS_STREAM_OUTPUT,
- samples_per_packet_ * 2, // Total latency.
- samples_per_packet_ / 2, // Call back when this many left.
- samples_per_packet_, // Call back with at least this much space.
- CRAS_STREAM_TYPE_DEFAULT,
- 0,
- this,
- CrasOutputStream::PutSamples,
- CrasOutputStream::StreamError,
- audio_format);
- if (stream_params == NULL) {
- LOG(WARNING) << "Error setting up stream parameters.";
- TransitionTo(kInError);
- callback->OnError(this, -ENOMEM);
- cras_audio_format_destroy(audio_format);
- return;
- }
-
- // Before starting the stream, save the number of bytes in a frame for use in
- // the callback.
- bytes_per_frame_ = cras_client_format_bytes_per_frame(audio_format);
-
- // Adding the stream will start the audio callbacks requesting data.
- int err = cras_client_add_stream(client_, &stream_id_, stream_params);
- if (err < 0) {
- LOG(WARNING) << "Failed to add the stream";
- TransitionTo(kInError);
- callback->OnError(this, err);
- cras_audio_format_destroy(audio_format);
- cras_client_stream_params_destroy(stream_params);
- return;
- }
-
- // Set initial volume.
- cras_client_set_stream_volume(client_, stream_id_, volume_);
-
- // Done with config params.
- cras_audio_format_destroy(audio_format);
- cras_client_stream_params_destroy(stream_params);
-}
-
-void CrasOutputStream::Stop() {
- if (!client_)
- return;
- // Removing the stream from the client stops audio.
- cras_client_rm_stream(client_, stream_id_);
- TransitionTo(kIsStopped);
-}
-
-void CrasOutputStream::SetVolume(double volume) {
- if (!client_)
- return;
- volume_ = static_cast<float>(volume);
- cras_client_set_stream_volume(client_, stream_id_, volume_);
-}
-
-void CrasOutputStream::GetVolume(double* volume) {
- *volume = volume_;
-}
-
-// Static callback asking for samples.
-int CrasOutputStream::PutSamples(cras_client* client,
- cras_stream_id_t stream_id,
- uint8* samples,
- size_t frames,
- const timespec* sample_ts,
- void* arg) {
- CrasOutputStream* me = static_cast<CrasOutputStream*>(arg);
- return me->Render(frames, samples, sample_ts);
-}
-
-// Static callback for stream errors.
-int CrasOutputStream::StreamError(cras_client* client,
- cras_stream_id_t stream_id,
- int err,
- void* arg) {
- CrasOutputStream* me = static_cast<CrasOutputStream*>(arg);
- me->NotifyStreamError(err);
- return 0;
-}
-
-// Note this is run from a real time thread, so don't waste cycles here.
-uint32 CrasOutputStream::Render(size_t frames,
- uint8* buffer,
- const timespec* sample_ts) {
- timespec latency_ts = {0, 0};
-
- // Determine latency and pass that on to the source.
- cras_client_calc_playback_latency(sample_ts, &latency_ts);
-
- // Treat negative latency (if we are too slow to render) as 0.
- uint32 latency_usec;
- if (latency_ts.tv_sec < 0 || latency_ts.tv_nsec < 0) {
- latency_usec = 0;
- } else {
- latency_usec = (latency_ts.tv_sec * 1000000) +
- latency_ts.tv_nsec / 1000;
- }
-
- uint32 frames_latency = latency_usec * frame_rate_ / 1000000;
- uint32 bytes_latency = frames_latency * bytes_per_frame_;
- DCHECK_EQ(frames, static_cast<size_t>(audio_bus_->frames()));
- int frames_filled = source_callback_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, bytes_latency));
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->ToInterleaved(
- frames_filled, bytes_per_frame_ / num_channels_, buffer);
- return frames_filled;
-}
-
-void CrasOutputStream::NotifyStreamError(int err) {
- // This will remove the stream from the client.
- if (state_ == kIsClosed || state_ == kInError)
- return; // Don't care about error if we aren't using it.
- TransitionTo(kInError);
- if (source_callback_)
- source_callback_->OnError(this, err);
-}
-
-bool CrasOutputStream::CanTransitionTo(InternalState to) {
- switch (state_) {
- case kCreated:
- return to == kIsOpened || to == kIsClosed || to == kInError;
-
- case kIsOpened:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kIsPlaying:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kIsStopped:
- return to == kIsPlaying || to == kIsStopped ||
- to == kIsClosed || to == kInError;
-
- case kInError:
- return to == kIsClosed || to == kInError;
-
- case kIsClosed:
- return false;
- }
- return false;
-}
-
-CrasOutputStream::InternalState
-CrasOutputStream::TransitionTo(InternalState to) {
- if (!CanTransitionTo(to)) {
- state_ = kInError;
- } else {
- state_ = to;
- }
- return state_;
-}
-
-CrasOutputStream::InternalState CrasOutputStream::state() {
- return state_;
-}
-
-} // namespace media
diff --git a/src/media/audio/linux/cras_output.h b/src/media/audio/linux/cras_output.h
deleted file mode 100644
index 8dffbce..0000000
--- a/src/media/audio/linux/cras_output.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Creates an output stream based on the cras (ChromeOS audio server) interface.
-//
-// CrasOutputStream object is *not* thread-safe and should only be used
-// from the audio thread.
-
-#ifndef MEDIA_AUDIO_LINUX_CRAS_OUTPUT_H_
-#define MEDIA_AUDIO_LINUX_CRAS_OUTPUT_H_
-
-#include <alsa/asoundlib.h>
-#include <cras_client.h>
-#include <ostream>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "media/audio/audio_io.h"
-
-namespace media {
-
-class AudioManagerLinux;
-class AudioParameters;
-
-// Implementation of AudioOuputStream for Chrome OS using the Chrome OS audio
-// server.
-class MEDIA_EXPORT CrasOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // audio manager who is creating this object.
- CrasOutputStream(const AudioParameters& params, AudioManagerLinux* manager);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~CrasOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- // Flags indicating the state of the stream.
- enum InternalState {
- kInError = 0,
- kCreated,
- kIsOpened,
- kIsPlaying,
- kIsStopped,
- kIsClosed
- };
- friend std::ostream& operator<<(std::ostream& os, InternalState);
- // Reports the current state for unit testing.
- InternalState state();
-
- private:
- // Handles requests to put samples in the provided buffer. This will be
- // called by the audio server when it needs more data.
- static int PutSamples(cras_client* client,
- cras_stream_id_t stream_id,
- uint8* samples,
- size_t frames,
- const timespec* sample_ts,
- void* arg);
-
- // Handles notificaiton that there was an error with the playback stream.
- static int StreamError(cras_client* client,
- cras_stream_id_t stream_id,
- int err,
- void* arg);
-
- // Actually fills buffer with audio data. Called from PutSamples().
- uint32 Render(size_t frames, uint8* buffer, const timespec* sample_ts);
-
- // Deals with an error that occured in the stream. Called from StreamError().
- void NotifyStreamError(int err);
-
- // Functions to safeguard state transitions. All changes to the object state
- // should go through these functions.
- bool CanTransitionTo(InternalState to);
- InternalState TransitionTo(InternalState to);
-
- // The client used to communicate with the audio server.
- cras_client* client_;
-
- // ID of the playing stream.
- cras_stream_id_t stream_id_;
-
- // Packet size in samples.
- uint32 samples_per_packet_;
-
- // Size of frame in bytes.
- uint32 bytes_per_frame_;
-
- // Rate in Hz.
- size_t frame_rate_;
-
- // Number of channels.
- size_t num_channels_;
-
- // PCM format for Alsa.
- const snd_pcm_format_t pcm_format_;
-
- // Current state.
- InternalState state_;
-
- // Volume level from 0.0 to 1.0.
- float volume_;
-
- // Audio manager that created us. Used to report that we've been closed.
- AudioManagerLinux* manager_;
-
- // Callback to get audio samples.
- AudioSourceCallback* source_callback_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(CrasOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_LINUX_CRAS_OUTPUT_H_
diff --git a/src/media/audio/linux/cras_output_unittest.cc b/src/media/audio/linux/cras_output_unittest.cc
deleted file mode 100644
index f9e2b24..0000000
--- a/src/media/audio/linux/cras_output_unittest.cc
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "media/audio/linux/audio_manager_linux.h"
-#include "media/audio/linux/cras_output.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using testing::_;
-using testing::DoAll;
-using testing::Return;
-using testing::SetArgumentPointee;
-using testing::StrictMock;
-
-namespace media {
-
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-class MockAudioManagerLinux : public AudioManagerLinux {
- public:
- MOCK_METHOD0(Init, void());
- MOCK_METHOD0(HasAudioOutputDevices, bool());
- MOCK_METHOD0(HasAudioInputDevices, bool());
- MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params));
- MOCK_METHOD2(MakeLinearOutputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
- MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
- const AudioParameters& params, const std::string& device_id));
-
- // We need to override this function in order to skip the checking the number
- // of active output streams. It is because the number of active streams
- // is managed inside MakeAudioOutputStream, and we don't use
- // MakeAudioOutputStream to create the stream in the tests.
- virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE {
- DCHECK(stream);
- delete stream;
- }
-
- // We don't mock this method since all tests will do the same thing
- // and use the current message loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
- return MessageLoop::current()->message_loop_proxy();
- }
-};
-
-class CrasOutputStreamTest : public testing::Test {
- protected:
- CrasOutputStreamTest() {
- mock_manager_.reset(new StrictMock<MockAudioManagerLinux>());
- }
-
- virtual ~CrasOutputStreamTest() {
- }
-
- CrasOutputStream* CreateStream(ChannelLayout layout) {
- return CreateStream(layout, kTestFramesPerPacket);
- }
-
- CrasOutputStream* CreateStream(ChannelLayout layout,
- int32 samples_per_packet) {
- AudioParameters params(kTestFormat, layout, kTestSampleRate,
- kTestBitsPerSample, samples_per_packet);
- return new CrasOutputStream(params,
- mock_manager_.get());
- }
-
- MockAudioManagerLinux& mock_manager() {
- return *(mock_manager_.get());
- }
-
- static const ChannelLayout kTestChannelLayout;
- static const int kTestSampleRate;
- static const int kTestBitsPerSample;
- static const int kTestBytesPerFrame;
- static const AudioParameters::Format kTestFormat;
- static const uint32 kTestFramesPerPacket;
- static const uint32 kTestPacketSize;
- static struct cras_audio_format* const kFakeAudioFormat;
- static struct cras_stream_params* const kFakeStreamParams;
- static struct cras_client* const kFakeClient;
-
- scoped_ptr<StrictMock<MockAudioManagerLinux> > mock_manager_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CrasOutputStreamTest);
-};
-
-const ChannelLayout CrasOutputStreamTest::kTestChannelLayout =
- CHANNEL_LAYOUT_STEREO;
-const int CrasOutputStreamTest::kTestSampleRate =
- AudioParameters::kAudioCDSampleRate;
-const int CrasOutputStreamTest::kTestBitsPerSample = 16;
-const int CrasOutputStreamTest::kTestBytesPerFrame =
- CrasOutputStreamTest::kTestBitsPerSample / 8 *
- ChannelLayoutToChannelCount(CrasOutputStreamTest::kTestChannelLayout);
-const AudioParameters::Format CrasOutputStreamTest::kTestFormat =
- AudioParameters::AUDIO_PCM_LINEAR;
-const uint32 CrasOutputStreamTest::kTestFramesPerPacket = 1000;
-const uint32 CrasOutputStreamTest::kTestPacketSize =
- CrasOutputStreamTest::kTestFramesPerPacket *
- CrasOutputStreamTest::kTestBytesPerFrame;
-struct cras_audio_format* const CrasOutputStreamTest::kFakeAudioFormat =
- reinterpret_cast<struct cras_audio_format*>(1);
-struct cras_stream_params* const CrasOutputStreamTest::kFakeStreamParams =
- reinterpret_cast<struct cras_stream_params*>(1);
-struct cras_client* const CrasOutputStreamTest::kFakeClient =
- reinterpret_cast<struct cras_client*>(1);
-
-TEST_F(CrasOutputStreamTest, ConstructedState) {
- // Should support mono.
- CrasOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- EXPECT_EQ(CrasOutputStream::kCreated, test_stream->state());
- test_stream->Close();
-
- // Should support stereo.
- test_stream = CreateStream(CHANNEL_LAYOUT_SURROUND);
- EXPECT_EQ(CrasOutputStream::kCreated, test_stream->state());
- test_stream->Close();
-
- // Bad bits per sample.
- AudioParameters bad_bps_params(kTestFormat, kTestChannelLayout,
- kTestSampleRate, kTestBitsPerSample - 1,
- kTestFramesPerPacket);
- test_stream = new CrasOutputStream(bad_bps_params, mock_manager_.get());
- EXPECT_EQ(CrasOutputStream::kInError, test_stream->state());
- test_stream->Close();
-
- // Bad format.
- AudioParameters bad_format_params(AudioParameters::AUDIO_LAST_FORMAT,
- kTestChannelLayout, kTestSampleRate,
- kTestBitsPerSample, kTestFramesPerPacket);
- test_stream = new CrasOutputStream(bad_format_params, mock_manager_.get());
- EXPECT_EQ(CrasOutputStream::kInError, test_stream->state());
- test_stream->Close();
-
- // Bad sample rate.
- AudioParameters bad_rate_params(kTestFormat, kTestChannelLayout,
- 0, kTestBitsPerSample, kTestFramesPerPacket);
- test_stream = new CrasOutputStream(bad_rate_params, mock_manager_.get());
- EXPECT_EQ(CrasOutputStream::kInError, test_stream->state());
- test_stream->Close();
-}
-
-TEST_F(CrasOutputStreamTest, OpenClose) {
- CrasOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- // Open the stream.
- ASSERT_TRUE(test_stream->Open());
- EXPECT_EQ(CrasOutputStream::kIsOpened, test_stream->state());
-
- // Close the stream.
- test_stream->Close();
-}
-
-TEST_F(CrasOutputStreamTest, StartFailBeforeOpen) {
- CrasOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- MockAudioSourceCallback mock_callback;
-
- test_stream->Start(&mock_callback);
- EXPECT_EQ(CrasOutputStream::kInError, test_stream->state());
-}
-
-TEST_F(CrasOutputStreamTest, StartStop) {
- CrasOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- MockAudioSourceCallback mock_callback;
-
- // Open the stream.
- ASSERT_TRUE(test_stream->Open());
- EXPECT_EQ(CrasOutputStream::kIsOpened, test_stream->state());
-
- // Start.
- test_stream->Start(&mock_callback);
- EXPECT_EQ(CrasOutputStream::kIsPlaying, test_stream->state());
-
- // Stop.
- test_stream->Stop();
- EXPECT_EQ(CrasOutputStream::kIsStopped, test_stream->state());
-
- // Close the stream.
- test_stream->Close();
-}
-
-TEST_F(CrasOutputStreamTest, RenderFrames) {
- CrasOutputStream* test_stream = CreateStream(CHANNEL_LAYOUT_MONO);
- MockAudioSourceCallback mock_callback;
-
- // Open the stream.
- ASSERT_TRUE(test_stream->Open());
- EXPECT_EQ(CrasOutputStream::kIsOpened, test_stream->state());
-
- // Render Callback.
- EXPECT_CALL(mock_callback, OnMoreData(_, _))
- .WillRepeatedly(Return(kTestFramesPerPacket));
-
- // Start.
- test_stream->Start(&mock_callback);
- EXPECT_EQ(CrasOutputStream::kIsPlaying, test_stream->state());
-
- // Stop.
- test_stream->Stop();
- EXPECT_EQ(CrasOutputStream::kIsStopped, test_stream->state());
-
- // Close the stream.
- test_stream->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_input_mac.cc b/src/media/audio/mac/audio_input_mac.cc
deleted file mode 100644
index e741b29..0000000
--- a/src/media/audio/mac/audio_input_mac.cc
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_input_mac.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_util.h"
-
-#if !defined(OS_IOS)
-#include <CoreServices/CoreServices.h>
-#endif
-
-namespace media {
-
-PCMQueueInAudioInputStream::PCMQueueInAudioInputStream(
- AudioManagerBase* manager, const AudioParameters& params)
- : manager_(manager),
- callback_(NULL),
- audio_queue_(NULL),
- buffer_size_bytes_(0),
- started_(false) {
- // We must have a manager.
- DCHECK(manager_);
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (params.bits_per_sample() * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- buffer_size_bytes_ = params.GetBytesPerBuffer();
-}
-
-PCMQueueInAudioInputStream::~PCMQueueInAudioInputStream() {
- DCHECK(!callback_);
- DCHECK(!audio_queue_);
-}
-
-bool PCMQueueInAudioInputStream::Open() {
- OSStatus err = AudioQueueNewInput(&format_,
- &HandleInputBufferStatic,
- this,
- NULL, // Use OS CFRunLoop for |callback|
- kCFRunLoopCommonModes,
- 0, // Reserved
- &audio_queue_);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- return SetupBuffers();
-}
-
-void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(callback);
- DLOG_IF(ERROR, !audio_queue_) << "Open() has not been called successfully";
- if (callback_ || !audio_queue_)
- return;
- callback_ = callback;
- OSStatus err = AudioQueueStart(audio_queue_, NULL);
- if (err != noErr) {
- HandleError(err);
- } else {
- started_ = true;
- manager_->IncreaseActiveInputStreamCount();
- }
-}
-
-void PCMQueueInAudioInputStream::Stop() {
- if (!audio_queue_ || !started_)
- return;
-
- // Stop is always called before Close. In case of error, this will be
- // also called when closing the input controller.
- manager_->DecreaseActiveInputStreamCount();
-
- // We request a synchronous stop, so the next call can take some time. In
- // the windows implementation we block here as well.
- OSStatus err = AudioQueueStop(audio_queue_, true);
- if (err != noErr)
- HandleError(err);
-
- started_ = false;
-}
-
-void PCMQueueInAudioInputStream::Close() {
- // It is valid to call Close() before calling Open() or Start(), thus
- // |audio_queue_| and |callback_| might be NULL.
- if (audio_queue_) {
- OSStatus err = AudioQueueDispose(audio_queue_, true);
- audio_queue_ = NULL;
- if (err != noErr)
- HandleError(err);
- }
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
- manager_->ReleaseInputStream(this);
- // CARE: This object may now be destroyed.
-}
-
-double PCMQueueInAudioInputStream::GetMaxVolume() {
- NOTREACHED() << "Only supported for low-latency mode.";
- return 0.0;
-}
-
-void PCMQueueInAudioInputStream::SetVolume(double volume) {
- NOTREACHED() << "Only supported for low-latency mode.";
-}
-
-double PCMQueueInAudioInputStream::GetVolume() {
- NOTREACHED() << "Only supported for low-latency mode.";
- return 0.0;
-}
-
-void PCMQueueInAudioInputStream::SetAutomaticGainControl(bool enabled) {
- NOTREACHED() << "Only supported for low-latency mode.";
-}
-
-bool PCMQueueInAudioInputStream::GetAutomaticGainControl() {
- NOTREACHED() << "Only supported for low-latency mode.";
- return false;
-}
-
-void PCMQueueInAudioInputStream::HandleError(OSStatus err) {
- if (callback_)
- callback_->OnError(this, static_cast<int>(err));
- // This point should never be reached.
- OSSTATUS_DCHECK(0, err);
-}
-
-bool PCMQueueInAudioInputStream::SetupBuffers() {
- DCHECK(buffer_size_bytes_);
- for (int i = 0; i < kNumberBuffers; ++i) {
- AudioQueueBufferRef buffer;
- OSStatus err = AudioQueueAllocateBuffer(audio_queue_,
- buffer_size_bytes_,
- &buffer);
- if (err == noErr)
- err = QueueNextBuffer(buffer);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- // |buffer| will automatically be freed when |audio_queue_| is released.
- }
- return true;
-}
-
-OSStatus PCMQueueInAudioInputStream::QueueNextBuffer(
- AudioQueueBufferRef audio_buffer) {
- // Only the first 2 params are needed for recording.
- return AudioQueueEnqueueBuffer(audio_queue_, audio_buffer, 0, NULL);
-}
-
-// static
-void PCMQueueInAudioInputStream::HandleInputBufferStatic(
- void* data,
- AudioQueueRef audio_queue,
- AudioQueueBufferRef audio_buffer,
- const AudioTimeStamp* start_time,
- UInt32 num_packets,
- const AudioStreamPacketDescription* desc) {
- reinterpret_cast<PCMQueueInAudioInputStream*>(data)->
- HandleInputBuffer(audio_queue, audio_buffer, start_time,
- num_packets, desc);
-}
-
-void PCMQueueInAudioInputStream::HandleInputBuffer(
- AudioQueueRef audio_queue,
- AudioQueueBufferRef audio_buffer,
- const AudioTimeStamp* start_time,
- UInt32 num_packets,
- const AudioStreamPacketDescription* packet_desc) {
- DCHECK_EQ(audio_queue_, audio_queue);
- DCHECK(audio_buffer->mAudioData);
- if (!callback_) {
- // This can happen if Stop() was called without start.
- DCHECK_EQ(0U, audio_buffer->mAudioDataByteSize);
- return;
- }
-
- if (audio_buffer->mAudioDataByteSize) {
- // The AudioQueue API may use a large internal buffer and repeatedly call us
- // back to back once that internal buffer is filled. When this happens the
- // renderer client does not have enough time to read data back from the
- // shared memory before the next write comes along. If HandleInputBuffer()
- // is called too frequently, Sleep() at least 5ms to ensure the shared
- // memory doesn't get trampled.
- // TODO(dalecurtis): This is a HACK. Long term the AudioQueue path is going
- // away in favor of the AudioUnit based AUAudioInputStream(). Tracked by
- // http://crbug.com/161383.
- base::TimeDelta elapsed = base::Time::Now() - last_fill_;
- const base::TimeDelta kMinDelay = base::TimeDelta::FromMilliseconds(5);
- if (elapsed < kMinDelay)
- base::PlatformThread::Sleep(kMinDelay - elapsed);
-
- callback_->OnData(this,
- reinterpret_cast<const uint8*>(audio_buffer->mAudioData),
- audio_buffer->mAudioDataByteSize,
- audio_buffer->mAudioDataByteSize,
- 0.0);
-
- last_fill_ = base::Time::Now();
- }
- // Recycle the buffer.
- OSStatus err = QueueNextBuffer(audio_buffer);
- if (err != noErr) {
- if (err == kAudioQueueErr_EnqueueDuringReset) {
- // This is the error you get if you try to enqueue a buffer and the
- // queue has been closed. Not really a problem if indeed the queue
- // has been closed.
- // TODO(joth): PCMQueueOutAudioOutputStream uses callback_ to provide an
- // extra guard for this situation, but it seems to introduce more
- // complications than it solves (memory barrier issues accessing it from
- // multiple threads, looses the means to indicate OnClosed to client).
- // Should determine if we need to do something equivalent here.
- return;
- }
- HandleError(err);
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_input_mac.h b/src/media/audio/mac/audio_input_mac.h
deleted file mode 100644
index 1f9856f..0000000
--- a/src/media/audio/mac/audio_input_mac.h
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
-
-#include <AudioToolbox/AudioQueue.h>
-#include <AudioToolbox/AudioFormat.h>
-
-#include "base/compiler_specific.h"
-#include "base/time.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerBase;
-
-// Implementation of AudioInputStream for Mac OS X using the audio queue service
-// present in OS 10.5 and later. Design reflects PCMQueueOutAudioOutputStream.
-class PCMQueueInAudioInputStream : public AudioInputStream {
- public:
- // Parameters as per AudioManager::MakeAudioInputStream.
- PCMQueueInAudioInputStream(AudioManagerBase* manager,
- const AudioParameters& params);
- virtual ~PCMQueueInAudioInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- private:
- // Issue the OnError to |callback_|;
- void HandleError(OSStatus err);
-
- // Allocates and prepares the memory that will be used for recording.
- bool SetupBuffers();
-
- // Sends a buffer to the audio driver for recording.
- OSStatus QueueNextBuffer(AudioQueueBufferRef audio_buffer);
-
- // Callback from OS, delegates to non-static version below.
- static void HandleInputBufferStatic(
- void* data,
- AudioQueueRef audio_queue,
- AudioQueueBufferRef audio_buffer,
- const AudioTimeStamp* start_time,
- UInt32 num_packets,
- const AudioStreamPacketDescription* desc);
-
- // Handles callback from OS. Will be called on OS internal thread.
- void HandleInputBuffer(AudioQueueRef audio_queue,
- AudioQueueBufferRef audio_buffer,
- const AudioTimeStamp* start_time,
- UInt32 num_packets,
- const AudioStreamPacketDescription* packet_desc);
-
- static const int kNumberBuffers = 3;
-
- // Manager that owns this stream, used for closing down.
- AudioManagerBase* manager_;
- // We use the callback mostly to periodically supply the recorded audio data.
- AudioInputCallback* callback_;
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
- // Handle to the OS audio queue object.
- AudioQueueRef audio_queue_;
- // Size of each of the buffers in |audio_buffers_|
- uint32 buffer_size_bytes_;
- // True iff Start() has been called successfully.
- bool started_;
- // Used to determine if we need to slow down |callback_| calls.
- base::Time last_fill_;
-
- DISALLOW_COPY_AND_ASSIGN(PCMQueueInAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_INPUT_MAC_H_
diff --git a/src/media/audio/mac/audio_low_latency_input_mac.cc b/src/media/audio/mac/audio_low_latency_input_mac.cc
deleted file mode 100644
index 85eef1f..0000000
--- a/src/media/audio/mac/audio_low_latency_input_mac.cc
+++ /dev/null
@@ -1,656 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_low_latency_input_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/data_buffer.h"
-
-namespace media {
-
-static const int kMinIntervalBetweenVolumeUpdatesMs = 1000;
-
-static std::ostream& operator<<(std::ostream& os,
- const AudioStreamBasicDescription& format) {
- os << "sample rate : " << format.mSampleRate << std::endl
- << "format ID : " << format.mFormatID << std::endl
- << "format flags : " << format.mFormatFlags << std::endl
- << "bytes per packet : " << format.mBytesPerPacket << std::endl
- << "frames per packet : " << format.mFramesPerPacket << std::endl
- << "bytes per frame : " << format.mBytesPerFrame << std::endl
- << "channels per frame: " << format.mChannelsPerFrame << std::endl
- << "bits per channel : " << format.mBitsPerChannel;
- return os;
-}
-
-// See "Technical Note TN2091 - Device input using the HAL Output Audio Unit"
-// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
-// for more details and background regarding this implementation.
-
-AUAudioInputStream::AUAudioInputStream(
- AudioManagerMac* manager, const AudioParameters& params,
- AudioDeviceID audio_device_id)
- : manager_(manager),
- sink_(NULL),
- audio_unit_(0),
- input_device_id_(audio_device_id),
- started_(false),
- hardware_latency_frames_(0),
- number_of_channels_in_frame_(0) {
- DCHECK(manager_);
-
- // Set up the desired (output) format specified by the client.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1; // uncompressed audio
- format_.mBytesPerPacket = (format_.mBitsPerChannel *
- params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- DVLOG(1) << "Desired ouput format: " << format_;
-
- // Set number of sample frames per callback used by the internal audio layer.
- // An internal FIFO is then utilized to adapt the internal size to the size
- // requested by the client.
- // Note that we use the same native buffer size as for the output side here
- // since the AUHAL implementation requires that both capture and render side
- // use the same buffer size. See http://crbug.com/154352 for more details.
- number_of_frames_ = GetAudioHardwareBufferSize();
- DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
-
- // Derive size (in bytes) of the buffers that we will render to.
- UInt32 data_byte_size = number_of_frames_ * format_.mBytesPerFrame;
- DVLOG(1) << "Size of data buffer in bytes : " << data_byte_size;
-
- // Allocate AudioBuffers to be used as storage for the received audio.
- // The AudioBufferList structure works as a placeholder for the
- // AudioBuffer structure, which holds a pointer to the actual data buffer.
- audio_data_buffer_.reset(new uint8[data_byte_size]);
- audio_buffer_list_.mNumberBuffers = 1;
-
- AudioBuffer* audio_buffer = audio_buffer_list_.mBuffers;
- audio_buffer->mNumberChannels = params.channels();
- audio_buffer->mDataByteSize = data_byte_size;
- audio_buffer->mData = audio_data_buffer_.get();
-
- // Set up an internal FIFO buffer that will accumulate recorded audio frames
- // until a requested size is ready to be sent to the client.
- // It is not possible to ask for less than |kAudioFramesPerCallback| number of
- // audio frames.
- const size_t requested_size_frames =
- params.GetBytesPerBuffer() / format_.mBytesPerPacket;
- DCHECK_GE(requested_size_frames, number_of_frames_);
- requested_size_bytes_ = requested_size_frames * format_.mBytesPerFrame;
- DVLOG(1) << "Requested buffer size in bytes : " << requested_size_bytes_;
- DLOG_IF(INFO, requested_size_frames > number_of_frames_) << "FIFO is used";
-
- // Allocate some extra memory to avoid memory reallocations.
- // Ensure that the size is an even multiple of |number_of_frames_ and
- // larger than |requested_size_frames|.
- // Example: number_of_frames_=128, requested_size_frames=480 =>
- // allocated space equals 4*128=512 audio frames
- const int max_forward_capacity = format_.mBytesPerFrame * number_of_frames_ *
- ((requested_size_frames / number_of_frames_) + 1);
- fifo_.reset(new media::SeekableBuffer(0, max_forward_capacity));
-
- data_ = new media::DataBuffer(requested_size_bytes_);
-}
-
-AUAudioInputStream::~AUAudioInputStream() {}
-
-// Obtain and open the AUHAL AudioOutputUnit for recording.
-bool AUAudioInputStream::Open() {
- // Verify that we are not already opened.
- if (audio_unit_)
- return false;
-
- // Verify that we have a valid device.
- if (input_device_id_ == kAudioObjectUnknown) {
- NOTREACHED() << "Device ID is unknown";
- return false;
- }
-
- // Start by obtaining an AudioOuputUnit using an AUHAL component description.
-
- Component comp;
- ComponentDescription desc;
-
- // Description for the Audio Unit we want to use (AUHAL in this case).
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_HALOutput;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
- comp = FindNextComponent(0, &desc);
- DCHECK(comp);
-
- // Get access to the service provided by the specified Audio Unit.
- OSStatus result = OpenAComponent(comp, &audio_unit_);
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Enable IO on the input scope of the Audio Unit.
-
- // After creating the AUHAL object, we must enable IO on the input scope
- // of the Audio Unit to obtain the device input. Input must be explicitly
- // enabled with the kAudioOutputUnitProperty_EnableIO property on Element 1
- // of the AUHAL. Beacause the AUHAL can be used for both input and output,
- // we must also disable IO on the output scope.
-
- UInt32 enableIO = 1;
-
- // Enable input on the AUHAL.
- result = AudioUnitSetProperty(audio_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input element 1
- &enableIO, // enable
- sizeof(enableIO));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Disable output on the AUHAL.
- enableIO = 0;
- result = AudioUnitSetProperty(audio_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output element 0
- &enableIO, // disable
- sizeof(enableIO));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Next, set the audio device to be the Audio Unit's current device.
- // Note that, devices can only be set to the AUHAL after enabling IO.
- result = AudioUnitSetProperty(audio_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &input_device_id_,
- sizeof(input_device_id_));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Register the input procedure for the AUHAL.
- // This procedure will be called when the AUHAL has received new data
- // from the input device.
- AURenderCallbackStruct callback;
- callback.inputProc = InputProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(audio_unit_,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global,
- 0,
- &callback,
- sizeof(callback));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Set up the the desired (output) format.
- // For obtaining input from a device, the device format is always expressed
- // on the output scope of the AUHAL's Element 1.
- result = AudioUnitSetProperty(audio_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &format_,
- sizeof(format_));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Set the desired number of frames in the IO buffer (output scope).
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as audio_util::GetAudioHardwareBufferSize().
- // TODO(henrika): Due to http://crrev.com/159666 this is currently not true
- // and should be fixed, a CHECK() should be added at that time.
- result = AudioUnitSetProperty(audio_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 1,
- &number_of_frames_, // size is set in the ctor
- sizeof(number_of_frames_));
- if (result) {
- HandleError(result);
- return false;
- }
-
- // Finally, initialize the audio unit and ensure that it is ready to render.
- // Allocates memory according to the maximum number of audio frames
- // it can produce in response to a single render call.
- result = AudioUnitInitialize(audio_unit_);
- if (result) {
- HandleError(result);
- return false;
- }
-
- // The hardware latency is fixed and will not change during the call.
- hardware_latency_frames_ = GetHardwareLatency();
-
- // The master channel is 0, Left and right are channels 1 and 2.
- // And the master channel is not counted in |number_of_channels_in_frame_|.
- number_of_channels_in_frame_ = GetNumberOfChannelsFromStream();
-
- return true;
-}
-
-void AUAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(callback);
- DLOG_IF(ERROR, !audio_unit_) << "Open() has not been called successfully";
- if (started_ || !audio_unit_)
- return;
- sink_ = callback;
- OSStatus result = AudioOutputUnitStart(audio_unit_);
- if (result == noErr) {
- started_ = true;
- }
- OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
- << "Failed to start acquiring data";
-}
-
-void AUAudioInputStream::Stop() {
- if (!started_)
- return;
- OSStatus result = AudioOutputUnitStop(audio_unit_);
- if (result == noErr) {
- started_ = false;
- }
- OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
- << "Failed to stop acquiring data";
-}
-
-void AUAudioInputStream::Close() {
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- if (started_) {
- Stop();
- }
- if (audio_unit_) {
- // Deallocate the audio unit’s resources.
- AudioUnitUninitialize(audio_unit_);
-
- // Terminates our connection to the AUHAL component.
- CloseComponent(audio_unit_);
- audio_unit_ = 0;
- }
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseInputStream(this);
-}
-
-double AUAudioInputStream::GetMaxVolume() {
- // Verify that we have a valid device.
- if (input_device_id_ == kAudioObjectUnknown) {
- NOTREACHED() << "Device ID is unknown";
- return 0.0;
- }
-
- // Query if any of the master, left or right channels has volume control.
- for (int i = 0; i <= number_of_channels_in_frame_; ++i) {
- // If the volume is settable, the valid volume range is [0.0, 1.0].
- if (IsVolumeSettableOnChannel(i))
- return 1.0;
- }
-
- // Volume control is not available for the audio stream.
- return 0.0;
-}
-
-void AUAudioInputStream::SetVolume(double volume) {
- DVLOG(1) << "SetVolume(volume=" << volume << ")";
- DCHECK_GE(volume, 0.0);
- DCHECK_LE(volume, 1.0);
-
- // Verify that we have a valid device.
- if (input_device_id_ == kAudioObjectUnknown) {
- NOTREACHED() << "Device ID is unknown";
- return;
- }
-
- Float32 volume_float32 = static_cast<Float32>(volume);
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput,
- kAudioObjectPropertyElementMaster
- };
-
- // Try to set the volume for master volume channel.
- if (IsVolumeSettableOnChannel(kAudioObjectPropertyElementMaster)) {
- OSStatus result = AudioObjectSetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- sizeof(volume_float32),
- &volume_float32);
- if (result != noErr) {
- DLOG(WARNING) << "Failed to set volume to " << volume_float32;
- }
- return;
- }
-
- // There is no master volume control, try to set volume for each channel.
- int successful_channels = 0;
- for (int i = 1; i <= number_of_channels_in_frame_; ++i) {
- property_address.mElement = static_cast<UInt32>(i);
- if (IsVolumeSettableOnChannel(i)) {
- OSStatus result = AudioObjectSetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- sizeof(volume_float32),
- &volume_float32);
- if (result == noErr)
- ++successful_channels;
- }
- }
-
- DLOG_IF(WARNING, successful_channels == 0)
- << "Failed to set volume to " << volume_float32;
-
- // Update the AGC volume level based on the last setting above. Note that,
- // the volume-level resolution is not infinite and it is therefore not
- // possible to assume that the volume provided as input parameter can be
- // used directly. Instead, a new query to the audio hardware is required.
- // This method does nothing if AGC is disabled.
- UpdateAgcVolume();
-}
-
-double AUAudioInputStream::GetVolume() {
- // Verify that we have a valid device.
- if (input_device_id_ == kAudioObjectUnknown){
- NOTREACHED() << "Device ID is unknown";
- return 0.0;
- }
-
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput,
- kAudioObjectPropertyElementMaster
- };
-
- if (AudioObjectHasProperty(input_device_id_, &property_address)) {
- // The device supports master volume control, get the volume from the
- // master channel.
- Float32 volume_float32 = 0.0;
- UInt32 size = sizeof(volume_float32);
- OSStatus result = AudioObjectGetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &volume_float32);
- if (result == noErr)
- return static_cast<double>(volume_float32);
- } else {
- // There is no master volume control, try to get the average volume of
- // all the channels.
- Float32 volume_float32 = 0.0;
- int successful_channels = 0;
- for (int i = 1; i <= number_of_channels_in_frame_; ++i) {
- property_address.mElement = static_cast<UInt32>(i);
- if (AudioObjectHasProperty(input_device_id_, &property_address)) {
- Float32 channel_volume = 0;
- UInt32 size = sizeof(channel_volume);
- OSStatus result = AudioObjectGetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &channel_volume);
- if (result == noErr) {
- volume_float32 += channel_volume;
- ++successful_channels;
- }
- }
- }
-
- // Get the average volume of the channels.
- if (successful_channels != 0)
- return static_cast<double>(volume_float32 / successful_channels);
- }
-
- DLOG(WARNING) << "Failed to get volume";
- return 0.0;
-}
-
-// AUHAL AudioDeviceOutput unit callback
-OSStatus AUAudioInputStream::InputProc(void* user_data,
- AudioUnitRenderActionFlags* flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Verify that the correct bus is used (Input bus/Element 1)
- DCHECK_EQ(bus_number, static_cast<UInt32>(1));
- AUAudioInputStream* audio_input =
- reinterpret_cast<AUAudioInputStream*>(user_data);
- DCHECK(audio_input);
- if (!audio_input)
- return kAudioUnitErr_InvalidElement;
-
- // Receive audio from the AUHAL from the output scope of the Audio Unit.
- OSStatus result = AudioUnitRender(audio_input->audio_unit(),
- flags,
- time_stamp,
- bus_number,
- number_of_frames,
- audio_input->audio_buffer_list());
- if (result)
- return result;
-
- // Deliver recorded data to the consumer as a callback.
- return audio_input->Provide(number_of_frames,
- audio_input->audio_buffer_list(),
- time_stamp);
-}
-
-OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
- AudioBufferList* io_data,
- const AudioTimeStamp* time_stamp) {
- // Update the capture latency.
- double capture_latency_frames = GetCaptureLatency(time_stamp);
-
- // Update the AGC volume level once every second. Note that, |volume| is
- // also updated each time SetVolume() is called through IPC by the
- // render-side AGC.
- double normalized_volume = 0.0;
- QueryAgcVolume(&normalized_volume);
-
- AudioBuffer& buffer = io_data->mBuffers[0];
- uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
- uint32 capture_delay_bytes = static_cast<uint32>
- ((capture_latency_frames + 0.5) * format_.mBytesPerFrame);
- DCHECK(audio_data);
- if (!audio_data)
- return kAudioUnitErr_InvalidElement;
-
- // See http://crbug.com/154352 for details.
- CHECK_EQ(number_of_frames, static_cast<UInt32>(number_of_frames_));
-
- // Accumulate captured audio in FIFO until we can match the output size
- // requested by the client.
- DCHECK_LE(fifo_->forward_bytes(), requested_size_bytes_);
- fifo_->Append(audio_data, buffer.mDataByteSize);
-
- // Deliver recorded data to the client as soon as the FIFO contains a
- // sufficient amount.
- if (fifo_->forward_bytes() >= requested_size_bytes_) {
- // Read from FIFO into temporary data buffer.
- fifo_->Read(data_->GetWritableData(), requested_size_bytes_);
-
- // Deliver data packet, delay estimation and volume level to the user.
- sink_->OnData(this,
- data_->GetData(),
- requested_size_bytes_,
- capture_delay_bytes,
- normalized_volume);
- }
-
- return noErr;
-}
-
-int AUAudioInputStream::HardwareSampleRate() {
- // Determine the default input device's sample-rate.
- AudioDeviceID device_id = kAudioObjectUnknown;
- UInt32 info_size = sizeof(device_id);
-
- AudioObjectPropertyAddress default_input_device_address = {
- kAudioHardwarePropertyDefaultInputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &default_input_device_address,
- 0,
- 0,
- &info_size,
- &device_id);
- if (result != noErr)
- return 0.0;
-
- Float64 nominal_sample_rate;
- info_size = sizeof(nominal_sample_rate);
-
- AudioObjectPropertyAddress nominal_sample_rate_address = {
- kAudioDevicePropertyNominalSampleRate,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- result = AudioObjectGetPropertyData(device_id,
- &nominal_sample_rate_address,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
- if (result != noErr)
- return 0.0;
-
- return static_cast<int>(nominal_sample_rate);
-}
-
-double AUAudioInputStream::GetHardwareLatency() {
- if (!audio_unit_ || input_device_id_ == kAudioObjectUnknown) {
- DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
- return 0.0;
- }
-
- // Get audio unit latency.
- Float64 audio_unit_latency_sec = 0.0;
- UInt32 size = sizeof(audio_unit_latency_sec);
- OSStatus result = AudioUnitGetProperty(audio_unit_,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global,
- 0,
- &audio_unit_latency_sec,
- &size);
- OSSTATUS_DLOG_IF(WARNING, result != noErr, result)
- << "Could not get audio unit latency";
-
- // Get input audio device latency.
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyLatency,
- kAudioDevicePropertyScopeInput,
- kAudioObjectPropertyElementMaster
- };
- UInt32 device_latency_frames = 0;
- size = sizeof(device_latency_frames);
- result = AudioObjectGetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &device_latency_frames);
- DLOG_IF(WARNING, result != noErr) << "Could not get audio device latency.";
-
- return static_cast<double>((audio_unit_latency_sec *
- format_.mSampleRate) + device_latency_frames);
-}
-
-double AUAudioInputStream::GetCaptureLatency(
- const AudioTimeStamp* input_time_stamp) {
- // Get the delay between between the actual recording instant and the time
- // when the data packet is provided as a callback.
- UInt64 capture_time_ns = AudioConvertHostTimeToNanos(
- input_time_stamp->mHostTime);
- UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
- double delay_frames = static_cast<double>
- (1e-9 * (now_ns - capture_time_ns) * format_.mSampleRate);
-
- // Total latency is composed by the dynamic latency and the fixed
- // hardware latency.
- return (delay_frames + hardware_latency_frames_);
-}
-
-int AUAudioInputStream::GetNumberOfChannelsFromStream() {
- // Get the stream format, to be able to read the number of channels.
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyStreamFormat,
- kAudioDevicePropertyScopeInput,
- kAudioObjectPropertyElementMaster
- };
- AudioStreamBasicDescription stream_format;
- UInt32 size = sizeof(stream_format);
- OSStatus result = AudioObjectGetPropertyData(input_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &stream_format);
- if (result != noErr) {
- DLOG(WARNING) << "Could not get stream format";
- return 0;
- }
-
- return static_cast<int>(stream_format.mChannelsPerFrame);
-}
-
-void AUAudioInputStream::HandleError(OSStatus err) {
- NOTREACHED() << "error " << GetMacOSStatusErrorString(err)
- << " (" << err << ")";
- if (sink_)
- sink_->OnError(this, static_cast<int>(err));
-}
-
-bool AUAudioInputStream::IsVolumeSettableOnChannel(int channel) {
- Boolean is_settable = false;
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyVolumeScalar,
- kAudioDevicePropertyScopeInput,
- static_cast<UInt32>(channel)
- };
- OSStatus result = AudioObjectIsPropertySettable(input_device_id_,
- &property_address,
- &is_settable);
- return (result == noErr) ? is_settable : false;
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_low_latency_input_mac.h b/src/media/audio/mac/audio_low_latency_input_mac.h
deleted file mode 100644
index 0c6edc0..0000000
--- a/src/media/audio/mac/audio_low_latency_input_mac.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Implementation of AudioInputStream for Mac OS X using the special AUHAL
-// input Audio Unit present in OS 10.4 and later.
-// The AUHAL input Audio Unit is for low-latency audio I/O.
-//
-// Overview of operation:
-//
-// - An object of AUAudioInputStream is created by the AudioManager
-// factory: audio_man->MakeAudioInputStream().
-// - Next some thread will call Open(), at that point the underlying
-// AUHAL output Audio Unit is created and configured.
-// - Then some thread will call Start(sink).
-// Then the Audio Unit is started which creates its own thread which
-// periodically will provide the sink with more data as buffers are being
-// produced/recorded.
-// - At some point some thread will call Stop(), which we handle by directly
-// stopping the AUHAL output Audio Unit.
-// - The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-//
-// Implementation notes:
-//
-// - It is recommended to first acquire the native sample rate of the default
-// input device and then use the same rate when creating this object.
-// Use AUAudioInputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-// - The latency consists of two parts:
-// 1) Hardware latency, which includes Audio Unit latency, audio device
-// latency;
-// 2) The delay between the actual recording instant and the time when the
-// data packet is provided as a callback.
-//
-#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
-
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/atomicops.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_input_stream_impl.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/seekable_buffer.h"
-
-namespace media {
-
-class AudioManagerMac;
-class DataBuffer;
-
-class AUAudioInputStream : public AudioInputStreamImpl {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AUAudioInputStream(AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID audio_device_id);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioInputStream::Close().
- virtual ~AUAudioInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
-
- // Returns the current hardware sample rate for the default input device.
- MEDIA_EXPORT static int HardwareSampleRate();
-
- bool started() const { return started_; }
- AudioUnit audio_unit() { return audio_unit_; }
- AudioBufferList* audio_buffer_list() { return &audio_buffer_list_; }
-
- private:
- // AudioOutputUnit callback.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // Pushes recorded data to consumer of the input audio stream.
- OSStatus Provide(UInt32 number_of_frames, AudioBufferList* io_data,
- const AudioTimeStamp* time_stamp);
-
- // Gets the fixed capture hardware latency and store it during initialization.
- // Returns 0 if not available.
- double GetHardwareLatency();
-
- // Gets the current capture delay value.
- double GetCaptureLatency(const AudioTimeStamp* input_time_stamp);
-
- // Gets the number of channels for a stream of audio data.
- int GetNumberOfChannelsFromStream();
-
- // Issues the OnError() callback to the |sink_|.
- void HandleError(OSStatus err);
-
- // Helper function to check if the volume control is avialable on specific
- // channel.
- bool IsVolumeSettableOnChannel(int channel);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- // Contains the desired number of audio frames in each callback.
- size_t number_of_frames_;
-
- // Pointer to the object that will receive the recorded audio samples.
- AudioInputCallback* sink_;
-
- // Structure that holds the desired output format of the stream.
- // Note that, this format can differ from the device(=input) format.
- AudioStreamBasicDescription format_;
-
- // The special Audio Unit called AUHAL, which allows us to pass audio data
- // directly from a microphone, through the HAL, and to our application.
- // The AUHAL also enables selection of non default devices.
- AudioUnit audio_unit_;
-
- // The UID refers to the current input audio device.
- AudioDeviceID input_device_id_;
-
- // Provides a mechanism for encapsulating one or more buffers of audio data.
- AudioBufferList audio_buffer_list_;
-
- // Temporary storage for recorded data. The InputProc() renders into this
- // array as soon as a frame of the desired buffer size has been recorded.
- scoped_array<uint8> audio_data_buffer_;
-
- // True after successfull Start(), false after successful Stop().
- bool started_;
-
- // Fixed capture hardware latency in frames.
- double hardware_latency_frames_;
-
- // The number of channels in each frame of audio data, which is used
- // when querying the volume of each channel.
- int number_of_channels_in_frame_;
-
- // Accumulates recorded data packets until the requested size has been stored.
- scoped_ptr<media::SeekableBuffer> fifo_;
-
- // Intermediate storage of data from the FIFO before sending it to the
- // client using the OnData() callback.
- scoped_refptr<media::DataBuffer> data_;
-
- // The client requests that the recorded data shall be delivered using
- // OnData() callbacks where each callback contains this amount of bytes.
- int requested_size_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_INPUT_MAC_H_
diff --git a/src/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/src/media/audio/mac/audio_low_latency_input_mac_unittest.cc
deleted file mode 100644
index e8ef33d..0000000
--- a/src/media/audio/mac/audio_low_latency_input_mac_unittest.cc
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/environment.h"
-#include "base/message_loop.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/platform_thread.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/mac/audio_low_latency_input_mac.h"
-#include "media/base/seekable_buffer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::AtLeast;
-using ::testing::Ge;
-using ::testing::NotNull;
-
-namespace media {
-
-ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
- if (++*count >= limit) {
- loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
- }
-}
-
-class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
- public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
- MOCK_METHOD2(OnError, void(AudioInputStream* stream, int code));
-};
-
-// This audio sink implementation should be used for manual tests only since
-// the recorded data is stored on a raw binary data file.
-// The last test (WriteToFileAudioSink) - which is disabled by default -
-// can use this audio sink to store the captured data on a file for offline
-// analysis.
-class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
- public:
- // Allocate space for ~10 seconds of data @ 48kHz in stereo:
- // 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
- static const int kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
-
- explicit WriteToFileAudioSink(const char* file_name)
- : buffer_(0, kMaxBufferSize),
- file_(fopen(file_name, "wb")),
- bytes_to_write_(0) {
- }
-
- virtual ~WriteToFileAudioSink() {
- int bytes_written = 0;
- while (bytes_written < bytes_to_write_) {
- const uint8* chunk;
- int chunk_size;
-
- // Stop writing if no more data is available.
- if (!buffer_.GetCurrentChunk(&chunk, &chunk_size))
- break;
-
- // Write recorded data chunk to the file and prepare for next chunk.
- fwrite(chunk, 1, chunk_size, file_);
- buffer_.Seek(chunk_size);
- bytes_written += chunk_size;
- }
- fclose(file_);
- }
-
- // AudioInputStream::AudioInputCallback implementation.
- virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) {
- // Store data data in a temporary buffer to avoid making blocking
- // fwrite() calls in the audio callback. The complete buffer will be
- // written to file in the destructor.
- if (buffer_.Append(src, size)) {
- bytes_to_write_ += size;
- }
- }
-
- virtual void OnClose(AudioInputStream* stream) {}
- virtual void OnError(AudioInputStream* stream, int code) {}
-
- private:
- media::SeekableBuffer buffer_;
- FILE* file_;
- int bytes_to_write_;
-};
-
-class MacAudioInputTest : public testing::Test {
- protected:
- MacAudioInputTest() : audio_manager_(AudioManager::Create()) {}
- virtual ~MacAudioInputTest() {}
-
- // Convenience method which ensures that we are not running on the build
- // bots and that at least one valid input device can be found.
- bool CanRunAudioTests() {
- bool has_input = audio_manager_->HasAudioInputDevices();
- if (!has_input)
- LOG(WARNING) << "No input devices detected";
- return has_input;
- }
-
- // Convenience method which creates a default AudioInputStream object using
- // a 10ms frame size and a sample rate which is set to the hardware sample
- // rate.
- AudioInputStream* CreateDefaultAudioInputStream() {
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_STEREO, fs, 16, samples_per_packet),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- return ais;
- }
-
- // Convenience method which creates an AudioInputStream object with a
- // specified channel layout.
- AudioInputStream* CreateAudioInputStream(ChannelLayout channel_layout) {
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout, fs, 16, samples_per_packet),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- return ais;
- }
-
- scoped_ptr<AudioManager> audio_manager_;
-};
-
-// Test Create(), Close().
-TEST_F(MacAudioInputTest, AUAudioInputStreamCreateAndClose) {
- if (!CanRunAudioTests())
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- ais->Close();
-}
-
-// Test Open(), Close().
-TEST_F(MacAudioInputTest, AUAudioInputStreamOpenAndClose) {
- if (!CanRunAudioTests())
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- EXPECT_TRUE(ais->Open());
- ais->Close();
-}
-
-// Test Open(), Start(), Close().
-TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
- if (!CanRunAudioTests())
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- EXPECT_TRUE(ais->Open());
- MockAudioInputCallback sink;
- ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Test Open(), Start(), Stop(), Close().
-TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
- if (!CanRunAudioTests())
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- EXPECT_TRUE(ais->Open());
- MockAudioInputCallback sink;
- ais->Start(&sink);
- ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Test some additional calling sequences.
-TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
- if (!CanRunAudioTests())
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- AUAudioInputStream* auais = static_cast<AUAudioInputStream*>(ais);
-
- // Open(), Open() should fail the second time.
- EXPECT_TRUE(ais->Open());
- EXPECT_FALSE(ais->Open());
-
- MockAudioInputCallback sink;
-
- // Start(), Start() is a valid calling sequence (second call does nothing).
- ais->Start(&sink);
- EXPECT_TRUE(auais->started());
- ais->Start(&sink);
- EXPECT_TRUE(auais->started());
-
- // Stop(), Stop() is a valid calling sequence (second call does nothing).
- ais->Stop();
- EXPECT_FALSE(auais->started());
- ais->Stop();
- EXPECT_FALSE(auais->started());
-
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Verify that recording starts and stops correctly in mono using mocked sink.
-TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
- if (!CanRunAudioTests())
- return;
-
- int count = 0;
- MessageLoopForUI loop;
-
- // Create an audio input stream which records in mono.
- AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_MONO);
- EXPECT_TRUE(ais->Open());
-
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = samples_per_packet * (bits_per_sample / 8);
-
- MockAudioInputCallback sink;
-
- // We use 10ms packets and will run the test until ten packets are received.
- // All should contain valid packets of the same size and a valid delay
- // estimate.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
- ais->Start(&sink);
- loop.Run();
- ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Verify that recording starts and stops correctly in mono using mocked sink.
-TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
- if (!CanRunAudioTests())
- return;
-
- int count = 0;
- MessageLoopForUI loop;
-
- // Create an audio input stream which records in stereo.
- AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_STEREO);
- EXPECT_TRUE(ais->Open());
-
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = 2 * samples_per_packet * (bits_per_sample / 8);
-
- MockAudioInputCallback sink;
-
- // We use 10ms packets and will run the test until ten packets are received.
- // All should contain valid packets of the same size and a valid delay
- // estimate.
- // TODO(henrika): http://crbug.com/154352 forced us to run the capture side
- // using a native buffer size of 128 audio frames and combine it with a FIFO
- // to match the requested size by the client. This change might also have
- // modified the delay estimates since the existing Ge(bytes_per_packet) for
- // parameter #4 does no longer pass. I am removing this restriction here to
- // ensure that we can land the patch but will revisit this test again when
- // more analysis of the delay estimates are done.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
- ais->Start(&sink);
- loop.Run();
- ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// This test is intended for manual tests and should only be enabled
-// when it is required to store the captured data on a local file.
-// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
-// To include disabled tests in test execution, just invoke the test program
-// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
-// environment variable to a value greater than 0.
-TEST_F(MacAudioInputTest, DISABLED_AUAudioInputStreamRecordToFile) {
- if (!CanRunAudioTests())
- return;
- const char* file_name = "out_stereo_10sec.pcm";
-
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- AudioInputStream* ais = CreateDefaultAudioInputStream();
- EXPECT_TRUE(ais->Open());
-
- fprintf(stderr, " File name : %s\n", file_name);
- fprintf(stderr, " Sample rate: %d\n", fs);
- WriteToFileAudioSink file_sink(file_name);
- fprintf(stderr, " >> Speak into the mic while recording...\n");
- ais->Start(&file_sink);
- base::PlatformThread::Sleep(TestTimeouts::action_timeout());
- ais->Stop();
- fprintf(stderr, " >> Recording has stopped.\n");
- ais->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_low_latency_output_mac.cc b/src/media/audio/mac/audio_low_latency_output_mac.cc
deleted file mode 100644
index 98182b0..0000000
--- a/src/media/audio/mac/audio_low_latency_output_mac.cc
+++ /dev/null
@@ -1,402 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/media_switches.h"
-
-namespace media {
-
-static std::ostream& operator<<(std::ostream& os,
- const AudioStreamBasicDescription& format) {
- os << "sample rate : " << format.mSampleRate << std::endl
- << "format ID : " << format.mFormatID << std::endl
- << "format flags : " << format.mFormatFlags << std::endl
- << "bytes per packet : " << format.mBytesPerPacket << std::endl
- << "frames per packet : " << format.mFramesPerPacket << std::endl
- << "bytes per frame : " << format.mBytesPerFrame << std::endl
- << "channels per frame: " << format.mChannelsPerFrame << std::endl
- << "bits per channel : " << format.mBitsPerChannel;
- return os;
-}
-
-static AudioObjectPropertyAddress kDefaultOutputDeviceAddress = {
- kAudioHardwarePropertyDefaultOutputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
-};
-
-// Overview of operation:
-// 1) An object of AUAudioOutputStream is created by the AudioManager
-// factory: audio_man->MakeAudioStream().
-// 2) Next some thread will call Open(), at that point the underlying
-// default output Audio Unit is created and configured.
-// 3) Then some thread will call Start(source).
-// Then the Audio Unit is started which creates its own thread which
-// periodically will call the source for more data as buffers are being
-// consumed.
-// 4) At some point some thread will call Stop(), which we handle by directly
-// stopping the default output Audio Unit.
-// 6) The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-
-AUAudioOutputStream::AUAudioOutputStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- output_unit_(0),
- output_device_id_(kAudioObjectUnknown),
- volume_(1),
- hardware_latency_frames_(0),
- stopped_(false),
- audio_bus_(AudioBus::Create(params)) {
- // We must have a manager.
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- DVLOG(1) << "Desired ouput format: " << format_;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
- DVLOG(1) << "Number of frames per callback: " << number_of_frames_;
- CHECK_EQ(number_of_frames_, GetAudioHardwareBufferSize());
-}
-
-AUAudioOutputStream::~AUAudioOutputStream() {
-}
-
-bool AUAudioOutputStream::Open() {
- // Obtain the current input device selected by the user.
- UInt32 size = sizeof(output_device_id_);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &size,
- &output_device_id_);
- if (result != noErr || output_device_id_ == kAudioObjectUnknown) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default audio output device.";
- return false;
- }
-
- // Open and initialize the DefaultOutputUnit.
- AudioComponent comp;
- AudioComponentDescription desc;
-
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
- comp = AudioComponentFindNext(0, &desc);
- if (!comp)
- return false;
-
- result = AudioComponentInstanceNew(comp, &output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "AudioComponentInstanceNew() failed.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "AudioUnitInitialize() failed.";
- return false;
- }
-
- hardware_latency_frames_ = GetHardwareLatency();
-
- return Configure();
-}
-
-bool AUAudioOutputStream::Configure() {
- // Set the render callback.
- AURenderCallbackStruct input;
- input.inputProc = InputProc;
- input.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Global,
- 0,
- &input,
- sizeof(input));
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed.";
- return false;
- }
-
- // Set the stream format.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &format_,
- sizeof(format_));
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed.";
- return false;
- }
-
- // Set the buffer frame size.
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as audio_util::GetAudioHardwareBufferSize().
- // See http://crbug.com/154352 for details.
- UInt32 buffer_size = number_of_frames_;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 0,
- &buffer_size,
- sizeof(buffer_size));
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
- return false;
- }
-
- return true;
-}
-
-void AUAudioOutputStream::Close() {
- if (output_unit_)
- AudioComponentInstanceDispose(output_unit_);
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AUAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- if (!output_unit_) {
- DLOG(ERROR) << "Open() has not been called successfully";
- return;
- }
-
- stopped_ = false;
- source_ = callback;
-
- AudioOutputUnitStart(output_unit_);
-}
-
-void AUAudioOutputStream::Stop() {
- // We request a synchronous stop, so the next call can take some time. In
- // the windows implementation we block here as well.
- if (stopped_)
- return;
-
- AudioOutputUnitStop(output_unit_);
-
- source_ = NULL;
- stopped_ = true;
-}
-
-void AUAudioOutputStream::SetVolume(double volume) {
- if (!output_unit_)
- return;
- volume_ = static_cast<float>(volume);
-
- // TODO(crogers): set volume property
-}
-
-void AUAudioOutputStream::GetVolume(double* volume) {
- if (!output_unit_)
- return;
- *volume = volume_;
-}
-
-// Pulls on our provider to get rendered audio stream.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames,
- AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp) {
- // Update the playout latency.
- double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
-
- AudioBuffer& buffer = io_data->mBuffers[0];
- uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
- uint32 hardware_pending_bytes = static_cast<uint32>
- ((playout_latency_frames + 0.5) * format_.mBytesPerFrame);
-
- // Unfortunately AUAudioInputStream and AUAudioOutputStream share the frame
- // size set by kAudioDevicePropertyBufferFrameSize above on a per process
- // basis. What this means is that the |number_of_frames| value may be larger
- // or smaller than the value set during Configure(). In this case either
- // audio input or audio output will be broken, so just output silence.
- // TODO(crogers): Figure out what can trigger a change in |number_of_frames|.
- // See http://crbug.com/1543 for details.
- if (number_of_frames != static_cast<UInt32>(audio_bus_->frames())) {
- memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
- return noErr;
- }
-
- int frames_filled = source_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, hardware_pending_bytes));
-
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->ToInterleaved(
- frames_filled, format_.mBitsPerChannel / 8, audio_data);
- uint32 filled = frames_filled * format_.mBytesPerFrame;
-
- // Perform in-place, software-volume adjustments.
- media::AdjustVolume(audio_data,
- filled,
- audio_bus_->channels(),
- format_.mBitsPerChannel / 8,
- volume_);
-
- return noErr;
-}
-
-// DefaultOutputUnit callback
-OSStatus AUAudioOutputStream::InputProc(void* user_data,
- AudioUnitRenderActionFlags*,
- const AudioTimeStamp* output_time_stamp,
- UInt32,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AUAudioOutputStream* audio_output =
- static_cast<AUAudioOutputStream*>(user_data);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(number_of_frames, io_data, output_time_stamp);
-}
-
-int AUAudioOutputStream::HardwareSampleRate() {
- // Determine the default output device's sample-rate.
- AudioDeviceID device_id = kAudioObjectUnknown;
- UInt32 info_size = sizeof(device_id);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &info_size,
- &device_id);
- if (result != noErr || device_id == kAudioObjectUnknown) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default audio output device.";
- return 0;
- }
-
- Float64 nominal_sample_rate;
- info_size = sizeof(nominal_sample_rate);
-
- AudioObjectPropertyAddress nominal_sample_rate_address = {
- kAudioDevicePropertyNominalSampleRate,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- result = AudioObjectGetPropertyData(device_id,
- &nominal_sample_rate_address,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default sample rate for device: " << device_id;
- return 0;
- }
-
- return static_cast<int>(nominal_sample_rate);
-}
-
-double AUAudioOutputStream::GetHardwareLatency() {
- if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) {
- DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
- return 0.0;
- }
-
- // Get audio unit latency.
- Float64 audio_unit_latency_sec = 0.0;
- UInt32 size = sizeof(audio_unit_latency_sec);
- OSStatus result = AudioUnitGetProperty(output_unit_,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global,
- 0,
- &audio_unit_latency_sec,
- &size);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- // Get output audio device latency.
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyLatency,
- kAudioDevicePropertyScopeOutput,
- kAudioObjectPropertyElementMaster
- };
- UInt32 device_latency_frames = 0;
- size = sizeof(device_latency_frames);
- result = AudioObjectGetPropertyData(output_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &device_latency_frames);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- return static_cast<double>((audio_unit_latency_sec *
- format_.mSampleRate) + device_latency_frames);
-}
-
-double AUAudioOutputStream::GetPlayoutLatency(
- const AudioTimeStamp* output_time_stamp) {
- // Ensure mHostTime is valid.
- if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0)
- return 0;
-
- // Get the delay between the moment getting the callback and the scheduled
- // time stamp that tells when the data is going to be played out.
- UInt64 output_time_ns = AudioConvertHostTimeToNanos(
- output_time_stamp->mHostTime);
- UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
-
- // Prevent overflow leading to huge delay information; occurs regularly on
- // the bots, probably less so in the wild.
- if (now_ns > output_time_ns)
- return 0;
-
- double delay_frames = static_cast<double>
- (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate);
-
- return (delay_frames + hardware_latency_frames_);
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_low_latency_output_mac.h b/src/media/audio/mac/audio_low_latency_output_mac.h
deleted file mode 100644
index 4ceb4af..0000000
--- a/src/media/audio/mac/audio_low_latency_output_mac.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Implementation notes:
-//
-// - It is recommended to first acquire the native sample rate of the default
-// output device and then use the same rate when creating this object.
-// Use AUAudioOutputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-// - The latency consists of two parts:
-// 1) Hardware latency, which includes Audio Unit latency, audio device
-// latency;
-// 2) The delay between the moment getting the callback and the scheduled time
-// stamp that tells when the data is going to be played out.
-//
-#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOuputStream for Mac OS X using the
-// default output Audio Unit present in OS 10.4 and later.
-// The default output Audio Unit is for low-latency audio I/O.
-class AUAudioOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AUAudioOutputStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AUAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- static int HardwareSampleRate();
-
- private:
- // DefaultOutputUnit callback.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus Render(UInt32 number_of_frames, AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp);
-
- // Sets up the stream format for the default output Audio Unit.
- bool Configure();
-
- // Gets the fixed playout device hardware latency and stores it. Returns 0
- // if not available.
- double GetHardwareLatency();
-
- // Gets the current playout latency value.
- double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- size_t number_of_frames_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // The default output Audio Unit which talks to the audio hardware.
- AudioUnit output_unit_;
-
- // The UID refers to the current output audio device.
- AudioDeviceID output_device_id_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Fixed playout hardware latency in frames.
- double hardware_latency_frames_;
-
- // The flag used to stop the streaming.
- bool stopped_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AUAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
diff --git a/src/media/audio/mac/audio_manager_mac.cc b/src/media/audio/mac/audio_manager_mac.cc
deleted file mode 100644
index e0300ed..0000000
--- a/src/media/audio/mac/audio_manager_mac.cc
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_manager_mac.h"
-
-#include <CoreAudio/AudioHardware.h>
-#include <string>
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/mac/mac_logging.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/sys_string_conversions.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_input_mac.h"
-#include "media/audio/mac/audio_low_latency_input_mac.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-#include "media/audio/mac/audio_output_mac.h"
-#include "media/audio/mac/audio_synchronized_mac.h"
-#include "media/audio/mac/audio_unified_mac.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-static bool HasAudioHardware(AudioObjectPropertySelector selector) {
- AudioDeviceID output_device_id = kAudioObjectUnknown;
- const AudioObjectPropertyAddress property_address = {
- selector,
- kAudioObjectPropertyScopeGlobal, // mScope
- kAudioObjectPropertyElementMaster // mElement
- };
- UInt32 output_device_id_size = static_cast<UInt32>(sizeof(output_device_id));
- OSStatus err = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &property_address,
- 0, // inQualifierDataSize
- NULL, // inQualifierData
- &output_device_id_size,
- &output_device_id);
- return err == kAudioHardwareNoError &&
- output_device_id != kAudioObjectUnknown;
-}
-
-// Returns true if the default input device is the same as
-// the default output device.
-static bool HasUnifiedDefaultIO() {
- AudioDeviceID input_id, output_id;
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(input_id);
-
- // Get the default input.
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &input_id);
-
- if (result != noErr)
- return false;
-
- // Get the default output.
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &output_id);
-
- if (result != noErr)
- return false;
-
- return input_id == output_id;
-}
-
-static void GetAudioDeviceInfo(bool is_input,
- media::AudioDeviceNames* device_names) {
- DCHECK(device_names);
- device_names->clear();
-
- // Query the number of total devices.
- AudioObjectPropertyAddress property_address = {
- kAudioHardwarePropertyDevices,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- UInt32 size = 0;
- OSStatus result = AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
- &property_address,
- 0,
- NULL,
- &size);
- if (result || !size)
- return;
-
- int device_count = size / sizeof(AudioDeviceID);
-
- // Get the array of device ids for all the devices, which includes both
- // input devices and output devices.
- scoped_ptr_malloc<AudioDeviceID>
- devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
- AudioDeviceID* device_ids = devices.get();
- result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &property_address,
- 0,
- NULL,
- &size,
- device_ids);
- if (result)
- return;
-
- // Iterate over all available devices to gather information.
- for (int i = 0; i < device_count; ++i) {
- // Get the number of input or output channels of the device.
- property_address.mScope = is_input ?
- kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
- property_address.mSelector = kAudioDevicePropertyStreams;
- size = 0;
- result = AudioObjectGetPropertyDataSize(device_ids[i],
- &property_address,
- 0,
- NULL,
- &size);
- if (result || !size)
- continue;
-
- // Get device UID.
- CFStringRef uid = NULL;
- size = sizeof(uid);
- property_address.mSelector = kAudioDevicePropertyDeviceUID;
- property_address.mScope = kAudioObjectPropertyScopeGlobal;
- result = AudioObjectGetPropertyData(device_ids[i],
- &property_address,
- 0,
- NULL,
- &size,
- &uid);
- if (result)
- continue;
-
- // Get device name.
- CFStringRef name = NULL;
- property_address.mSelector = kAudioObjectPropertyName;
- property_address.mScope = kAudioObjectPropertyScopeGlobal;
- result = AudioObjectGetPropertyData(device_ids[i],
- &property_address,
- 0,
- NULL,
- &size,
- &name);
- if (result) {
- if (uid)
- CFRelease(uid);
- continue;
- }
-
- // Store the device name and UID.
- media::AudioDeviceName device_name;
- device_name.device_name = base::SysCFStringRefToUTF8(name);
- device_name.unique_id = base::SysCFStringRefToUTF8(uid);
- device_names->push_back(device_name);
-
- // We are responsible for releasing the returned CFObject. See the
- // comment in the AudioHardware.h for constant
- // kAudioDevicePropertyDeviceUID.
- if (uid)
- CFRelease(uid);
- if (name)
- CFRelease(name);
- }
-}
-
-static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
- const std::string& device_id) {
- AudioObjectPropertyAddress property_address = {
- kAudioHardwarePropertyDevices,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- AudioDeviceID audio_device_id = kAudioObjectUnknown;
- UInt32 device_size = sizeof(audio_device_id);
- OSStatus result = -1;
-
- if (device_id == AudioManagerBase::kDefaultDeviceId) {
- // Default Device.
- property_address.mSelector = is_input ?
- kAudioHardwarePropertyDefaultInputDevice :
- kAudioHardwarePropertyDefaultOutputDevice;
-
- result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &property_address,
- 0,
- 0,
- &device_size,
- &audio_device_id);
- } else {
- // Non-default device.
- base::mac::ScopedCFTypeRef<CFStringRef>
- uid(base::SysUTF8ToCFStringRef(device_id));
- AudioValueTranslation value;
- value.mInputData = &uid;
- value.mInputDataSize = sizeof(CFStringRef);
- value.mOutputData = &audio_device_id;
- value.mOutputDataSize = device_size;
- UInt32 translation_size = sizeof(AudioValueTranslation);
-
- property_address.mSelector = kAudioHardwarePropertyDeviceForUID;
- result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &property_address,
- 0,
- 0,
- &translation_size,
- &value);
- }
-
- if (result) {
- OSSTATUS_DLOG(WARNING, result) << "Unable to query device " << device_id
- << " for AudioDeviceID";
- }
-
- return audio_device_id;
-}
-
-// Property address to monitor for device changes.
-static const AudioObjectPropertyAddress kDeviceChangePropertyAddress = {
- kAudioHardwarePropertyDefaultOutputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
-};
-
-// Callback from the system when the default device changes; this must be called
-// on the MessageLoop that created the AudioManager.
-static OSStatus OnDefaultDeviceChangedCallback(
- AudioObjectID object,
- UInt32 num_addresses,
- const AudioObjectPropertyAddress addresses[],
- void* context) {
- if (object != kAudioObjectSystemObject)
- return noErr;
-
- for (UInt32 i = 0; i < num_addresses; ++i) {
- if (addresses[i].mSelector == kDeviceChangePropertyAddress.mSelector &&
- addresses[i].mScope == kDeviceChangePropertyAddress.mScope &&
- addresses[i].mElement == kDeviceChangePropertyAddress.mElement &&
- context) {
- static_cast<AudioManagerMac*>(context)->OnDeviceChange();
- break;
- }
- }
-
- return noErr;
-}
-
-AudioManagerMac::AudioManagerMac()
- : listener_registered_(false),
- creating_message_loop_(base::MessageLoopProxy::current()) {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-
- // AudioManagerMac is expected to be created by the root platform thread, this
- // is generally BrowserMainLoop, it's MessageLoop will drive the NSApplication
- // pump which in turn fires the property listener callbacks.
- if (!creating_message_loop_)
- return;
-
- OSStatus result = AudioObjectAddPropertyListener(
- kAudioObjectSystemObject,
- &kDeviceChangePropertyAddress,
- &OnDefaultDeviceChangedCallback,
- this);
-
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result) << "AudioObjectAddPropertyListener() failed!";
- return;
- }
-
- listener_registered_ = true;
-}
-
-AudioManagerMac::~AudioManagerMac() {
- if (listener_registered_) {
- // TODO(dalecurtis): CHECK destruction happens on |creating_message_loop_|,
- // should be true, but currently several unit tests perform destruction in
- // odd places so we can't CHECK here currently.
- OSStatus result = AudioObjectRemovePropertyListener(
- kAudioObjectSystemObject,
- &kDeviceChangePropertyAddress,
- &OnDefaultDeviceChangedCallback,
- this);
- OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
- << "AudioObjectRemovePropertyListener() failed!";
- }
-
- Shutdown();
-}
-
-bool AudioManagerMac::HasAudioOutputDevices() {
- return HasAudioHardware(kAudioHardwarePropertyDefaultOutputDevice);
-}
-
-bool AudioManagerMac::HasAudioInputDevices() {
- return HasAudioHardware(kAudioHardwarePropertyDefaultInputDevice);
-}
-
-void AudioManagerMac::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
- GetAudioDeviceInfo(true, device_names);
- if (!device_names->empty()) {
- // Prepend the default device to the list since we always want it to be
- // on the top of the list for all platforms. There is no duplicate
- // counting here since the default device has been abstracted out before.
- media::AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
- name.unique_id = AudioManagerBase::kDefaultDeviceId;
- device_names->push_front(name);
- }
-}
-
-AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new PCMQueueOutAudioOutputStream(this, params);
-}
-
-AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
-
- // TODO(crogers): remove once we properly handle input device selection.
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableWebAudioInput)) {
- if (HasUnifiedDefaultIO())
- return new AudioHardwareUnifiedStream(this, params);
-
- // kAudioDeviceUnknown translates to "use default" here.
- return new AudioSynchronizedStream(this,
- params,
- kAudioDeviceUnknown,
- kAudioDeviceUnknown);
- }
-
- return new AUAudioOutputStream(this, params);
-}
-
-AudioInputStream* AudioManagerMac::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new PCMQueueInAudioInputStream(this, params);
-}
-
-AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // Gets the AudioDeviceID that refers to the AudioOutputDevice with the device
- // unique id. This AudioDeviceID is used to set the device for Audio Unit.
- AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, device_id);
- AudioInputStream* stream = NULL;
- if (audio_device_id != kAudioObjectUnknown)
- stream = new AUAudioInputStream(this, params, audio_device_id);
-
- return stream;
-}
-
-AudioParameters AudioManagerMac::GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) {
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableWebAudioInput)) {
- // TODO(crogers): given the limitations of the AudioOutputStream
- // back-ends used with kEnableWebAudioInput, we hard-code to stereo.
- // Specifically, this is a limitation of AudioSynchronizedStream which
- // can be removed as part of the work to consolidate these back-ends.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- GetAudioHardwareSampleRate(), 16, GetAudioHardwareBufferSize());
- }
-
- return AudioManagerBase::GetPreferredLowLatencyOutputStreamParameters(
- input_params);
-}
-
-void AudioManagerMac::OnDeviceChange() {
- // Post the task to the |creating_message_loop_| to execute our listener
- // callback. The callback is created using BindToLoop() so will hop over
- // to the audio thread upon execution.
- creating_message_loop_->PostTask(FROM_HERE, BindToLoop(
- GetMessageLoop(), base::Bind(
- &AudioManagerMac::NotifyAllOutputDeviceChangeListeners,
- base::Unretained(this))));
-}
-
-AudioManager* CreateAudioManager() {
- return new AudioManagerMac();
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_manager_mac.h b/src/media/audio/mac/audio_manager_mac.h
deleted file mode 100644
index d8b6b2d..0000000
--- a/src/media/audio/mac/audio_manager_mac.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop_proxy.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-// Mac OS X implementation of the AudioManager singleton. This class is internal
-// to the audio output and only internal users can call methods not exposed by
-// the AudioManager class.
-class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
- public:
- AudioManagerMac();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioParameters GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) OVERRIDE;
-
- // Called by an internal device change listener. Must be called on
- // |creating_message_loop_|.
- void OnDeviceChange();
-
- protected:
- virtual ~AudioManagerMac();
-
- private:
- bool listener_registered_;
- scoped_refptr<base::MessageLoopProxy> creating_message_loop_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerMac);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
diff --git a/src/media/audio/mac/audio_output_mac.cc b/src/media/audio/mac/audio_output_mac.cc
deleted file mode 100644
index d0e0afa..0000000
--- a/src/media/audio/mac/audio_output_mac.cc
+++ /dev/null
@@ -1,553 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_output_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/channel_mixer.h"
-
-namespace media {
-
-// A custom data structure to store information an AudioQueue buffer.
-struct AudioQueueUserData {
- AudioQueueUserData() : empty_buffer(false) {}
- bool empty_buffer;
-};
-
-// Overview of operation:
-// 1) An object of PCMQueueOutAudioOutputStream is created by the AudioManager
-// factory: audio_man->MakeAudioStream(). This just fills some structure.
-// 2) Next some thread will call Open(), at that point the underliying OS
-// queue is created and the audio buffers allocated.
-// 3) Then some thread will call Start(source) At this point the source will be
-// called to fill the initial buffers in the context of that same thread.
-// Then the OS queue is started which will create its own thread which
-// periodically will call the source for more data as buffers are being
-// consumed.
-// 4) At some point some thread will call Stop(), which we handle by directly
-// stoping the OS queue.
-// 5) One more callback to the source could be delivered in in the context of
-// the queue's own thread. Data, if any will be discared.
-// 6) The same thread that called stop will call Close() where we cleanup
-// and notifiy the audio manager, which likley will destroy this object.
-
-PCMQueueOutAudioOutputStream::PCMQueueOutAudioOutputStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : audio_queue_(NULL),
- source_(NULL),
- manager_(manager),
- packet_size_(params.GetBytesPerBuffer()),
- silence_bytes_(0),
- volume_(1),
- pending_bytes_(0),
- num_source_channels_(params.channels()),
- source_layout_(params.channel_layout()),
- num_core_channels_(0),
- should_swizzle_(false),
- stopped_event_(true /* manual reset */, false /* initial state */),
- num_buffers_left_(kNumBuffers),
- audio_bus_(AudioBus::Create(params)) {
- // We must have a manager.
- DCHECK(manager_);
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- memset(buffer_, 0, sizeof(buffer_));
- memset(core_channel_orderings_, 0, sizeof(core_channel_orderings_));
- memset(channel_remap_, 0, sizeof(channel_remap_));
-
- if (params.bits_per_sample() > 8) {
- format_.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
- }
-
- // Silence buffer has a duration of 6ms to simulate the behavior of Windows.
- // This value is choosen by experiments and macs cannot keep up with
- // anything less than 6ms.
- silence_bytes_ = format_.mBytesPerFrame * params.sample_rate() * 6 / 1000;
-}
-
-PCMQueueOutAudioOutputStream::~PCMQueueOutAudioOutputStream() {
-}
-
-void PCMQueueOutAudioOutputStream::HandleError(OSStatus err) {
- // source_ can be set to NULL from another thread. We need to cache its
- // pointer while we operate here. Note that does not mean that the source
- // has been destroyed.
- AudioSourceCallback* source = GetSource();
- if (source)
- source->OnError(this, static_cast<int>(err));
- LOG(ERROR) << "error " << GetMacOSStatusErrorString(err)
- << " (" << err << ")";
-}
-
-bool PCMQueueOutAudioOutputStream::Open() {
- // Get the default device id.
- AudioObjectID device_id = 0;
- AudioObjectPropertyAddress property_address = {
- kAudioHardwarePropertyDefaultOutputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- UInt32 device_id_size = sizeof(device_id);
- OSStatus err = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &property_address, 0, NULL,
- &device_id_size, &device_id);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- // Get the size of the channel layout.
- UInt32 core_layout_size;
- property_address.mSelector = kAudioDevicePropertyPreferredChannelLayout;
- property_address.mScope = kAudioDevicePropertyScopeOutput;
- err = AudioObjectGetPropertyDataSize(device_id, &property_address, 0, NULL,
- &core_layout_size);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- // Get the device's channel layout. This layout may vary in sized based on
- // the number of channels. Use |core_layout_size| to allocate memory.
- scoped_ptr_malloc<AudioChannelLayout> core_channel_layout;
- core_channel_layout.reset(
- reinterpret_cast<AudioChannelLayout*>(malloc(core_layout_size)));
- memset(core_channel_layout.get(), 0, core_layout_size);
- err = AudioObjectGetPropertyData(device_id, &property_address, 0, NULL,
- &core_layout_size,
- core_channel_layout.get());
- if (err != noErr) {
- HandleError(err);
- return false;
- }
-
- num_core_channels_ = std::min(
- static_cast<int>(CHANNELS_MAX),
- static_cast<int>(core_channel_layout->mNumberChannelDescriptions));
- if (num_core_channels_ == 2 &&
- ChannelLayoutToChannelCount(source_layout_) > 2) {
- channel_mixer_.reset(new ChannelMixer(
- source_layout_, CHANNEL_LAYOUT_STEREO));
- mixed_audio_bus_ = AudioBus::Create(
- num_core_channels_, audio_bus_->frames());
-
- format_.mChannelsPerFrame = num_core_channels_;
- format_.mBytesPerFrame = (format_.mBitsPerChannel >> 3) *
- format_.mChannelsPerFrame;
- format_.mBytesPerPacket = format_.mBytesPerFrame * format_.mFramesPerPacket;
- }
-
- // Create the actual queue object and let the OS use its own thread to
- // run its CFRunLoop.
- err = AudioQueueNewOutput(&format_, RenderCallback, this, NULL,
- kCFRunLoopCommonModes, 0, &audio_queue_);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- // Allocate the hardware-managed buffers.
- for (uint32 ix = 0; ix != kNumBuffers; ++ix) {
- err = AudioQueueAllocateBuffer(audio_queue_, packet_size_, &buffer_[ix]);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
- // Allocate memory for user data.
- buffer_[ix]->mUserData = new AudioQueueUserData();
- }
- // Set initial volume here.
- err = AudioQueueSetParameter(audio_queue_, kAudioQueueParam_Volume, 1.0);
- if (err != noErr) {
- HandleError(err);
- return false;
- }
-
- // Capture channel layout in a format we can use.
- for (int i = 0; i < CHANNELS_MAX; ++i)
- core_channel_orderings_[i] = kEmptyChannel;
-
- bool all_channels_unknown = true;
- for (int i = 0; i < num_core_channels_; ++i) {
- AudioChannelLabel label =
- core_channel_layout->mChannelDescriptions[i].mChannelLabel;
- if (label == kAudioChannelLabel_Unknown) {
- continue;
- }
- all_channels_unknown = false;
- switch (label) {
- case kAudioChannelLabel_Left:
- core_channel_orderings_[LEFT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, LEFT);
- break;
- case kAudioChannelLabel_Right:
- core_channel_orderings_[RIGHT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, RIGHT);
- break;
- case kAudioChannelLabel_Center:
- core_channel_orderings_[CENTER] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, CENTER);
- break;
- case kAudioChannelLabel_LFEScreen:
- core_channel_orderings_[LFE] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, LFE);
- break;
- case kAudioChannelLabel_LeftSurround:
- core_channel_orderings_[SIDE_LEFT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, SIDE_LEFT);
- break;
- case kAudioChannelLabel_RightSurround:
- core_channel_orderings_[SIDE_RIGHT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, SIDE_RIGHT);
- break;
- case kAudioChannelLabel_LeftCenter:
- core_channel_orderings_[LEFT_OF_CENTER] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, LEFT_OF_CENTER);
- break;
- case kAudioChannelLabel_RightCenter:
- core_channel_orderings_[RIGHT_OF_CENTER] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, RIGHT_OF_CENTER);
- break;
- case kAudioChannelLabel_CenterSurround:
- core_channel_orderings_[BACK_CENTER] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, BACK_CENTER);
- break;
- case kAudioChannelLabel_RearSurroundLeft:
- core_channel_orderings_[BACK_LEFT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, BACK_LEFT);
- break;
- case kAudioChannelLabel_RearSurroundRight:
- core_channel_orderings_[BACK_RIGHT] = i;
- channel_remap_[i] = ChannelOrder(source_layout_, BACK_RIGHT);
- break;
- default:
- DLOG(WARNING) << "Channel label not supported";
- channel_remap_[i] = kEmptyChannel;
- break;
- }
- }
-
- if (all_channels_unknown) {
- return true;
- }
-
- // Check if we need to adjust the layout.
- // If the device has a BACK_LEFT and no SIDE_LEFT and the source has
- // a SIDE_LEFT but no BACK_LEFT, then move (and preserve the channel).
- // e.g. CHANNEL_LAYOUT_5POINT1 -> CHANNEL_LAYOUT_5POINT1_BACK
- CheckForAdjustedLayout(SIDE_LEFT, BACK_LEFT);
- // Same for SIDE_RIGHT -> BACK_RIGHT.
- CheckForAdjustedLayout(SIDE_RIGHT, BACK_RIGHT);
- // Move BACK_LEFT to SIDE_LEFT.
- // e.g. CHANNEL_LAYOUT_5POINT1_BACK -> CHANNEL_LAYOUT_5POINT1
- CheckForAdjustedLayout(BACK_LEFT, SIDE_LEFT);
- // Same for BACK_RIGHT -> SIDE_RIGHT.
- CheckForAdjustedLayout(BACK_RIGHT, SIDE_RIGHT);
- // Move SIDE_LEFT to LEFT_OF_CENTER.
- // e.g. CHANNEL_LAYOUT_7POINT1 -> CHANNEL_LAYOUT_7POINT1_WIDE
- CheckForAdjustedLayout(SIDE_LEFT, LEFT_OF_CENTER);
- // Same for SIDE_RIGHT -> RIGHT_OF_CENTER.
- CheckForAdjustedLayout(SIDE_RIGHT, RIGHT_OF_CENTER);
- // Move LEFT_OF_CENTER to SIDE_LEFT.
- // e.g. CHANNEL_LAYOUT_7POINT1_WIDE -> CHANNEL_LAYOUT_7POINT1
- CheckForAdjustedLayout(LEFT_OF_CENTER, SIDE_LEFT);
- // Same for RIGHT_OF_CENTER -> SIDE_RIGHT.
- CheckForAdjustedLayout(RIGHT_OF_CENTER, SIDE_RIGHT);
- // For MONO -> STEREO, move audio to LEFT and RIGHT if applicable.
- CheckForAdjustedLayout(CENTER, LEFT);
- CheckForAdjustedLayout(CENTER, RIGHT);
-
- // Check if we will need to swizzle from source to device layout (maybe not!).
- should_swizzle_ = false;
- for (int i = 0; i < num_core_channels_; ++i) {
- if (ChannelOrder(source_layout_, static_cast<Channels>(i)) !=
- core_channel_orderings_[i]) {
- should_swizzle_ = true;
- break;
- }
- }
-
- return true;
-}
-
-void PCMQueueOutAudioOutputStream::Close() {
- // It is valid to call Close() before calling Open(), thus audio_queue_
- // might be NULL.
- if (audio_queue_) {
- OSStatus err = 0;
- for (uint32 ix = 0; ix != kNumBuffers; ++ix) {
- if (buffer_[ix]) {
- // Free user data.
- delete static_cast<AudioQueueUserData*>(buffer_[ix]->mUserData);
- // Free AudioQueue buffer.
- err = AudioQueueFreeBuffer(audio_queue_, buffer_[ix]);
- if (err != noErr) {
- HandleError(err);
- break;
- }
- }
- }
- err = AudioQueueDispose(audio_queue_, true);
- if (err != noErr)
- HandleError(err);
- }
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void PCMQueueOutAudioOutputStream::Stop() {
- if (source_) {
- // We request a synchronous stop, so the next call can take some time. In
- // the windows implementation we block here as well.
- SetSource(NULL);
- stopped_event_.Wait();
- }
-}
-
-void PCMQueueOutAudioOutputStream::SetVolume(double volume) {
- if (!audio_queue_)
- return;
- volume_ = static_cast<float>(volume);
- OSStatus err = AudioQueueSetParameter(audio_queue_,
- kAudioQueueParam_Volume,
- volume);
- if (err != noErr) {
- HandleError(err);
- }
-}
-
-void PCMQueueOutAudioOutputStream::GetVolume(double* volume) {
- if (!audio_queue_)
- return;
- *volume = volume_;
-}
-
-template<class Format>
-void PCMQueueOutAudioOutputStream::SwizzleLayout(Format* b, uint32 filled) {
- Format src_format[num_source_channels_];
- int filled_channels = (num_core_channels_ < num_source_channels_) ?
- num_core_channels_ : num_source_channels_;
- for (uint32 i = 0; i < filled; i += sizeof(src_format),
- b += num_source_channels_) {
- // TODO(fbarchard): This could be further optimized with pshufb.
- memcpy(src_format, b, sizeof(src_format));
- for (int ch = 0; ch < filled_channels; ++ch) {
- if (channel_remap_[ch] != kEmptyChannel &&
- channel_remap_[ch] <= CHANNELS_MAX) {
- b[ch] = src_format[channel_remap_[ch]];
- } else {
- b[ch] = 0;
- }
- }
- }
-}
-
-bool PCMQueueOutAudioOutputStream::CheckForAdjustedLayout(
- Channels input_channel,
- Channels output_channel) {
- if (core_channel_orderings_[output_channel] > kEmptyChannel &&
- core_channel_orderings_[input_channel] == kEmptyChannel &&
- ChannelOrder(source_layout_, input_channel) > kEmptyChannel &&
- ChannelOrder(source_layout_, output_channel) == kEmptyChannel) {
- channel_remap_[core_channel_orderings_[output_channel]] =
- ChannelOrder(source_layout_, input_channel);
- return true;
- }
- return false;
-}
-
-// Note to future hackers of this function: Do not add locks to this function
-// that are held through any calls made back into AudioQueue APIs, or other
-// OS audio functions. This is because the OS dispatch may grab external
-// locks, or possibly re-enter this function which can lead to a deadlock.
-void PCMQueueOutAudioOutputStream::RenderCallback(void* p_this,
- AudioQueueRef queue,
- AudioQueueBufferRef buffer) {
- TRACE_EVENT0("audio", "PCMQueueOutAudioOutputStream::RenderCallback");
-
- PCMQueueOutAudioOutputStream* audio_stream =
- static_cast<PCMQueueOutAudioOutputStream*>(p_this);
-
- // Call the audio source to fill the free buffer with data. Not having a
- // source means that the queue has been stopped.
- AudioSourceCallback* source = audio_stream->GetSource();
- if (!source) {
- // PCMQueueOutAudioOutputStream::Stop() is waiting for callback to
- // stop the stream and signal when all callbacks are done.
- // (we probably can stop the stream there, but it is better to have
- // all the complex logic in one place; stopping latency is not very
- // important if you reuse audio stream in the mixer and not close it
- // immediately).
- --audio_stream->num_buffers_left_;
- if (audio_stream->num_buffers_left_ == kNumBuffers - 1) {
- // First buffer after stop requested, stop the queue.
- OSStatus err = AudioQueueStop(audio_stream->audio_queue_, true);
- if (err != noErr)
- audio_stream->HandleError(err);
- }
- if (audio_stream->num_buffers_left_ == 0) {
- // Now we finally saw all the buffers.
- // Signal that stopping is complete.
- // Should never touch audio_stream after signaling as it
- // can be deleted at any moment.
- audio_stream->stopped_event_.Signal();
- }
- return;
- }
-
- // Adjust the number of pending bytes by subtracting the amount played.
- if (!static_cast<AudioQueueUserData*>(buffer->mUserData)->empty_buffer)
- audio_stream->pending_bytes_ -= buffer->mAudioDataByteSize;
-
- uint32 capacity = buffer->mAudioDataBytesCapacity;
- AudioBus* audio_bus = audio_stream->audio_bus_.get();
- DCHECK_EQ(
- audio_bus->frames() * audio_stream->format_.mBytesPerFrame, capacity);
- // TODO(sergeyu): Specify correct hardware delay for AudioBuffersState.
- int frames_filled = source->OnMoreData(
- audio_bus, AudioBuffersState(audio_stream->pending_bytes_, 0));
- uint32 filled = frames_filled * audio_stream->format_.mBytesPerFrame;
-
- // TODO(dalecurtis): Channel downmixing, upmixing, should be done in mixer;
- // volume adjust should use SSE optimized vector_fmul() prior to interleave.
- AudioBus* output_bus = audio_bus;
- if (audio_stream->channel_mixer_) {
- output_bus = audio_stream->mixed_audio_bus_.get();
- audio_stream->channel_mixer_->Transform(audio_bus, output_bus);
- }
-
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- output_bus->ToInterleaved(
- frames_filled, audio_stream->format_.mBitsPerChannel / 8,
- buffer->mAudioData);
-
- // In order to keep the callback running, we need to provide a positive amount
- // of data to the audio queue. To simulate the behavior of Windows, we write
- // a buffer of silence.
- if (!filled) {
- CHECK(audio_stream->silence_bytes_ <= static_cast<int>(capacity));
- filled = audio_stream->silence_bytes_;
-
- // Assume unsigned audio.
- int silence_value = 128;
- if (audio_stream->format_.mBitsPerChannel > 8) {
- // When bits per channel is greater than 8, audio is signed.
- silence_value = 0;
- }
-
- memset(buffer->mAudioData, silence_value, filled);
- static_cast<AudioQueueUserData*>(buffer->mUserData)->empty_buffer = true;
- } else if (filled > capacity) {
- // User probably overran our buffer.
- audio_stream->HandleError(0);
- return;
- } else {
- static_cast<AudioQueueUserData*>(buffer->mUserData)->empty_buffer = false;
- }
-
- if (audio_stream->should_swizzle_) {
- // Handle channel order for surround sound audio.
- if (audio_stream->format_.mBitsPerChannel == 8) {
- audio_stream->SwizzleLayout(reinterpret_cast<uint8*>(buffer->mAudioData),
- filled);
- } else if (audio_stream->format_.mBitsPerChannel == 16) {
- audio_stream->SwizzleLayout(reinterpret_cast<int16*>(buffer->mAudioData),
- filled);
- } else if (audio_stream->format_.mBitsPerChannel == 32) {
- audio_stream->SwizzleLayout(reinterpret_cast<int32*>(buffer->mAudioData),
- filled);
- }
- }
-
- buffer->mAudioDataByteSize = filled;
-
- // Increment bytes by amount filled into audio buffer if this is not a
- // silence buffer.
- if (!static_cast<AudioQueueUserData*>(buffer->mUserData)->empty_buffer)
- audio_stream->pending_bytes_ += filled;
- if (NULL == queue)
- return;
- // Queue the audio data to the audio driver.
- OSStatus err = AudioQueueEnqueueBuffer(queue, buffer, 0, NULL);
- if (err != noErr) {
- if (err == kAudioQueueErr_EnqueueDuringReset) {
- // This is the error you get if you try to enqueue a buffer and the
- // queue has been closed. Not really a problem if indeed the queue
- // has been closed. We recheck the value of source now to see if it has
- // indeed been closed.
- if (!audio_stream->GetSource())
- return;
- }
- audio_stream->HandleError(err);
- }
-}
-
-void PCMQueueOutAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DLOG_IF(ERROR, !audio_queue_) << "Open() has not been called successfully";
- if (!audio_queue_)
- return;
-
- OSStatus err = noErr;
- SetSource(callback);
- pending_bytes_ = 0;
- stopped_event_.Reset();
- num_buffers_left_ = kNumBuffers;
- // Ask the source to pre-fill all our buffers before playing.
- for (uint32 ix = 0; ix != kNumBuffers; ++ix) {
- buffer_[ix]->mAudioDataByteSize = 0;
- // Caller waits for 1st packet to become available, but not for others,
- // so we wait for them here.
- if (ix != 0) {
- AudioSourceCallback* source = GetSource();
- if (source)
- source->WaitTillDataReady();
- }
- RenderCallback(this, NULL, buffer_[ix]);
- }
-
- // Queue the buffers to the audio driver, sounds starts now.
- for (uint32 ix = 0; ix != kNumBuffers; ++ix) {
- err = AudioQueueEnqueueBuffer(audio_queue_, buffer_[ix], 0, NULL);
- if (err != noErr) {
- HandleError(err);
- return;
- }
- }
- err = AudioQueueStart(audio_queue_, NULL);
- if (err != noErr) {
- HandleError(err);
- return;
- }
-}
-
-void PCMQueueOutAudioOutputStream::SetSource(AudioSourceCallback* source) {
- base::AutoLock lock(source_lock_);
- source_ = source;
-}
-
-AudioOutputStream::AudioSourceCallback*
-PCMQueueOutAudioOutputStream::GetSource() {
- base::AutoLock lock(source_lock_);
- return source_;
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_output_mac.h b/src/media/audio/mac/audio_output_mac.h
deleted file mode 100644
index c4b12f2..0000000
--- a/src/media/audio/mac/audio_output_mac.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_OUTPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_OUTPUT_MAC_H_
-
-#include <AudioToolbox/AudioFormat.h>
-#include <AudioToolbox/AudioQueue.h>
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-class ChannelMixer;
-
-// Implementation of AudioOuputStream for Mac OS X using the audio queue service
-// present in OS 10.5 and later. Audioqueue is the successor to the SoundManager
-// services but it is supported in 64 bits.
-class PCMQueueOutAudioOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- PCMQueueOutAudioOutputStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~PCMQueueOutAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- // The audio is double buffered.
- static const uint32 kNumBuffers = 2;
- static const int kEmptyChannel = -1;
-
- // Reorder PCM from source layout to device layout found in Core Audio.
- template<class Format>
- void SwizzleLayout(Format* b, uint32 filled);
- // Check and move channels if surround sound layout needs adjusted.
- bool CheckForAdjustedLayout(Channels input_channel, Channels output_channel);
-
- // The OS calls back here when an audio buffer has been processed.
- static void RenderCallback(void* p_this, AudioQueueRef queue,
- AudioQueueBufferRef buffer);
- // Called when an error occurs.
- void HandleError(OSStatus err);
-
- // Atomic operations for setting/getting the source callback.
- void SetSource(AudioSourceCallback* source);
- AudioSourceCallback* GetSource();
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
- // Handle to the OS audio queue object.
- AudioQueueRef audio_queue_;
- // Array of pointers to the OS managed audio buffers.
- AudioQueueBufferRef buffer_[kNumBuffers];
- // Mutex for the |source_| to implment atomic set and get.
- // It is important to NOT wait on any other locks while this is held.
- base::Lock source_lock_;
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
- // Packet size in bytes.
- uint32 packet_size_;
- // Number of bytes for making a silence buffer.
- int silence_bytes_;
- // Volume level from 0 to 1.
- float volume_;
- // Number of bytes yet to be played in audio buffer.
- uint32 pending_bytes_;
- // Number of channels in the source audio.
- int num_source_channels_;
- // Source's channel layout for surround sound channels.
- ChannelLayout source_layout_;
- // Device's channel layout.
- int core_channel_orderings_[CHANNELS_MAX];
- // An array for remapping source to device channel layouts during a swizzle.
- int channel_remap_[CHANNELS_MAX];
- // Number of channels in device layout.
- int num_core_channels_;
- // A flag to determine if swizzle is needed from source to device layouts.
- bool should_swizzle_;
- // A flag to determine if downmix is needed from source to device layouts.
- bool should_down_mix_;
-
- // Event used for synchronization when stopping the stream.
- // Callback sets it after stream is stopped.
- base::WaitableEvent stopped_event_;
- // When stopping we keep track of number of buffers in flight and
- // signal "stop completed" from the last buffer's callback.
- int num_buffers_left_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- // Channel mixer and temporary bus for the final mixed channel data.
- scoped_ptr<ChannelMixer> channel_mixer_;
- scoped_ptr<AudioBus> mixed_audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(PCMQueueOutAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_OUTPUT_MAC_H_
diff --git a/src/media/audio/mac/audio_output_mac_unittest.cc b/src/media/audio/mac/audio_output_mac_unittest.cc
deleted file mode 100644
index 919c17d..0000000
--- a/src/media/audio/mac/audio_output_mac_unittest.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::DoAll;
-using ::testing::Field;
-using ::testing::InSequence;
-using ::testing::Invoke;
-using ::testing::NiceMock;
-using ::testing::NotNull;
-using ::testing::Return;
-
-namespace media {
-
-class MockAudioSource : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-// ===========================================================================
-// Validation of AudioParameters::AUDIO_PCM_LINEAR
-//
-
-// Test that can it be created and closed.
-TEST(MacAudioTest, PCMWaveStreamGetAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices())
- return;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024));
- ASSERT_TRUE(NULL != oas);
- oas->Close();
-}
-
-// Test that it can be opened and closed.
-TEST(MacAudioTest, PCMWaveStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices())
- return;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024));
- ASSERT_TRUE(NULL != oas);
- EXPECT_TRUE(oas->Open());
- oas->Close();
-}
-
-// This test produces actual audio for 1.5 seconds on the default wave device at
-// 44.1K s/sec. Parameters have been chosen carefully so you should not hear
-// pops or noises while the sound is playing. The sound must also be identical
-// to the sound of PCMWaveStreamPlay200HzTone22KssMono test.
-TEST(MacAudioTest, PCMWaveStreamPlay200HzTone44KssMono) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices())
- return;
- uint32 frames_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, frames_100_ms));
- ASSERT_TRUE(NULL != oas);
- EXPECT_TRUE(oas->Open());
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
- oas->SetVolume(0.5);
- oas->Start(&source);
- usleep(500000);
-
- // Test that the volume is within the set limits.
- double volume = 0.0;
- oas->GetVolume(&volume);
- EXPECT_LT(volume, 0.51);
- EXPECT_GT(volume, 0.49);
- oas->Stop();
- oas->Close();
-}
-
-// This test produces actual audio for 1.5 seconds on the default wave device at
-// 22K s/sec. Parameters have been chosen carefully so you should not hear pops
-// or noises while the sound is playing. The sound must also be identical to the
-// sound of PCMWaveStreamPlay200HzTone44KssMono test.
-TEST(MacAudioTest, PCMWaveStreamPlay200HzTone22KssMono) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices())
- return;
- uint32 frames_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate / 2, 16,
- frames_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
- EXPECT_TRUE(oas->Open());
- oas->Start(&source);
- usleep(1500000);
- oas->Stop();
- oas->Close();
-}
-
-// Custom action to clear a memory buffer.
-static int ClearBuffer(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- audio_bus->Zero();
- return audio_bus->frames();
-}
-
-TEST(MacAudioTest, PCMWaveStreamPendingBytes) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices())
- return;
-
- uint32 frames_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, frames_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- NiceMock<MockAudioSource> source;
- EXPECT_TRUE(oas->Open());
-
- uint32 bytes_100_ms = frames_100_ms * 2;
-
- // We expect the amount of pending bytes will reaching |bytes_100_ms|
- // because the audio output stream has a double buffer scheme.
- // And then we will try to provide zero data so the amount of pending bytes
- // will go down and eventually read zero.
- InSequence s;
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes, 0)))
- .WillOnce(Invoke(ClearBuffer));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- bytes_100_ms)))
- .WillOnce(Invoke(ClearBuffer));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- bytes_100_ms)))
- .WillOnce(Return(0));
- EXPECT_CALL(source, OnMoreData(NotNull(), _))
- .Times(AnyNumber())
- .WillRepeatedly(Return(0));
-
- oas->Start(&source);
- usleep(500000);
- oas->Stop();
- oas->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_synchronized_mac.cc b/src/media/audio/mac/audio_synchronized_mac.cc
deleted file mode 100644
index 3861bcb..0000000
--- a/src/media/audio/mac/audio_synchronized_mac.cc
+++ /dev/null
@@ -1,945 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_synchronized_mac.h"
-
-#include <CoreServices/CoreServices.h>
-#include <algorithm>
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-namespace media {
-
-static const int kHardwareBufferSize = 128;
-static const int kFifoSize = 16384;
-
-// TODO(crogers): handle the non-stereo case.
-static const int kChannels = 2;
-
-// This value was determined empirically for minimum latency while still
-// guarding against FIFO under-runs.
-static const int kBaseTargetFifoFrames = 256 + 64;
-
-// If the input and output sample-rate don't match, then we need to maintain
-// an additional safety margin due to the callback timing jitter and the
-// varispeed buffering. This value was empirically tuned.
-static const int kAdditionalTargetFifoFrames = 128;
-
-static void ZeroBufferList(AudioBufferList* buffer_list) {
- for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
- memset(buffer_list->mBuffers[i].mData,
- 0,
- buffer_list->mBuffers[i].mDataByteSize);
-}
-
-static void WrapBufferList(AudioBufferList* buffer_list,
- AudioBus* bus,
- int frames) {
- DCHECK(buffer_list);
- DCHECK(bus);
- int channels = bus->channels();
- int buffer_list_channels = buffer_list->mNumberBuffers;
-
- // Copy pointers from AudioBufferList.
- int source_idx = 0;
- for (int i = 0; i < channels; ++i) {
- bus->SetChannelData(
- i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
-
- // It's ok to pass in a |buffer_list| with fewer channels, in which
- // case we just duplicate the last channel.
- if (source_idx < buffer_list_channels - 1)
- ++source_idx;
- }
-
- // Finally set the actual length.
- bus->set_frames(frames);
-}
-
-AudioSynchronizedStream::AudioSynchronizedStream(
- AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id)
- : manager_(manager),
- params_(params),
- input_sample_rate_(0),
- output_sample_rate_(0),
- input_id_(input_id),
- output_id_(output_id),
- input_buffer_list_(NULL),
- fifo_(kChannels, kFifoSize),
- target_fifo_frames_(kBaseTargetFifoFrames),
- average_delta_(0.0),
- fifo_rate_compensation_(1.0),
- input_unit_(0),
- varispeed_unit_(0),
- output_unit_(0),
- first_input_time_(-1),
- is_running_(false),
- hardware_buffer_size_(kHardwareBufferSize),
- channels_(kChannels) {
-}
-
-AudioSynchronizedStream::~AudioSynchronizedStream() {
- DCHECK(!input_unit_);
- DCHECK(!output_unit_);
- DCHECK(!varispeed_unit_);
-}
-
-bool AudioSynchronizedStream::Open() {
- if (params_.channels() != kChannels) {
- LOG(ERROR) << "Only stereo output is currently supported.";
- return false;
- }
-
- // Create the input, output, and varispeed AudioUnits.
- OSStatus result = CreateAudioUnits();
- if (result != noErr) {
- LOG(ERROR) << "Cannot create AudioUnits.";
- return false;
- }
-
- result = SetupInput(input_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring input AudioUnit.";
- return false;
- }
-
- result = SetupOutput(output_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring output AudioUnit.";
- return false;
- }
-
- result = SetupCallbacks();
- if (result != noErr) {
- LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
- return false;
- }
-
- result = SetupStreamFormats();
- if (result != noErr) {
- LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
- return false;
- }
-
- AllocateInputData();
-
- // Final initialization of the AudioUnits.
- result = AudioUnitInitialize(input_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing input AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing output AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(varispeed_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing varispeed AudioUnit.";
- return false;
- }
-
- if (input_sample_rate_ != output_sample_rate_) {
- // Add extra safety margin.
- target_fifo_frames_ += kAdditionalTargetFifoFrames;
- }
-
- // Buffer initial silence corresponding to target I/O buffering.
- fifo_.Clear();
- scoped_ptr<AudioBus> silence =
- AudioBus::Create(channels_, target_fifo_frames_);
- silence->Zero();
- fifo_.Push(silence.get());
-
- return true;
-}
-
-void AudioSynchronizedStream::Close() {
- DCHECK(!is_running_);
-
- if (input_buffer_list_) {
- free(input_buffer_list_);
- input_buffer_list_ = 0;
- input_bus_.reset(NULL);
- wrapper_bus_.reset(NULL);
- }
-
- if (input_unit_) {
- AudioUnitUninitialize(input_unit_);
- CloseComponent(input_unit_);
- }
-
- if (output_unit_) {
- AudioUnitUninitialize(output_unit_);
- CloseComponent(output_unit_);
- }
-
- if (varispeed_unit_) {
- AudioUnitUninitialize(varispeed_unit_);
- CloseComponent(varispeed_unit_);
- }
-
- input_unit_ = NULL;
- output_unit_ = NULL;
- varispeed_unit_ = NULL;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK(input_unit_);
- DCHECK(output_unit_);
- DCHECK(varispeed_unit_);
-
- if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
- return;
-
- source_ = callback;
-
- // Reset state variables each time we Start().
- fifo_rate_compensation_ = 1.0;
- average_delta_ = 0.0;
-
- OSStatus result = noErr;
-
- if (!is_running_) {
- first_input_time_ = -1;
-
- result = AudioOutputUnitStart(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStart(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- is_running_ = true;
-}
-
-void AudioSynchronizedStream::Stop() {
- OSStatus result = noErr;
- if (is_running_) {
- result = AudioOutputUnitStop(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStop(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- if (result == noErr)
- is_running_ = false;
-}
-
-bool AudioSynchronizedStream::IsRunning() {
- return is_running_;
-}
-
-// TODO(crogers): implement - or remove from AudioOutputStream.
-void AudioSynchronizedStream::SetVolume(double volume) {}
-void AudioSynchronizedStream::GetVolume(double* volume) {}
-
-OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
- AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- // Get the default output device if device is unknown.
- if (output_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(output_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &output_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- output_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_info_.Initialize(output_id, false);
-
- // Set the Current Device to the Default Output Unit.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &output_info_.id_,
- sizeof(output_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
- AudioDeviceID input_id) {
- OSStatus result = noErr;
-
- // Get the default input device if device is unknown.
- if (input_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(input_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &input_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- input_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_info_.Initialize(input_id, true);
-
- // Set the Current Device to the AUHAL.
- // This should be done only after I/O has been enabled on the AUHAL.
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &input_info_.id_,
- sizeof(input_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::CreateAudioUnits() {
- // Q: Why do we need a varispeed unit?
- // A: If the input device and the output device are running at
- // different sample rates and/or on different clocks, we will need
- // to compensate to avoid a pitch change and
- // to avoid buffer under and over runs.
- ComponentDescription varispeed_desc;
- varispeed_desc.componentType = kAudioUnitType_FormatConverter;
- varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
- varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- varispeed_desc.componentFlags = 0;
- varispeed_desc.componentFlagsMask = 0;
-
- Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
- if (varispeed_comp == NULL)
- return -1;
-
- OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open input AudioUnit.
- ComponentDescription input_desc;
- input_desc.componentType = kAudioUnitType_Output;
- input_desc.componentSubType = kAudioUnitSubType_HALOutput;
- input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- input_desc.componentFlags = 0;
- input_desc.componentFlagsMask = 0;
-
- Component input_comp = FindNextComponent(NULL, &input_desc);
- if (input_comp == NULL)
- return -1;
-
- result = OpenAComponent(input_comp, &input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open output AudioUnit.
- ComponentDescription output_desc;
- output_desc.componentType = kAudioUnitType_Output;
- output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- output_desc.componentFlags = 0;
- output_desc.componentFlagsMask = 0;
-
- Component output_comp = FindNextComponent(NULL, &output_desc);
- if (output_comp == NULL)
- return -1;
-
- result = OpenAComponent(output_comp, &output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
- // The AUHAL used for input needs to be initialized
- // before anything is done to it.
- OSStatus result = AudioUnitInitialize(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // We must enable the Audio Unit (AUHAL) for input and disable output
- // BEFORE setting the AUHAL's current device.
- result = EnableIO();
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = SetInputDeviceAsCurrent(input_id);
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::EnableIO() {
- // Enable input on the AUHAL.
- UInt32 enable_io = 1;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Disable Output on the AUHAL.
- enable_io = 0;
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- result = SetOutputDeviceAsCurrent(output_id);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Tell the output unit not to reset timestamps.
- // Otherwise sample rate changes will cause sync loss.
- UInt32 start_at_zero = 0;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_StartTimestampsAtZero,
- kAudioUnitScope_Global,
- 0,
- &start_at_zero,
- sizeof(start_at_zero));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupCallbacks() {
- // Set the input callback.
- AURenderCallbackStruct callback;
- callback.inputProc = InputProc;
- callback.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the output callback.
- callback.inputProc = OutputProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the varispeed callback.
- callback.inputProc = VarispeedProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupStreamFormats() {
- AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
-
- // Get the Stream Format (Output client side).
- UInt32 property_size = sizeof(asbd_dev1_in);
- OSStatus result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 1,
- &asbd_dev1_in,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (client side).
- property_size = sizeof(asbd);
- result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (Output client side).
- property_size = sizeof(asbd_dev2_out);
- result = AudioUnitGetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd_dev2_out,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the format of all the AUs to the input/output devices channel count.
- // For a simple case, you want to set this to
- // the lower of count of the channels in the input device vs output device.
- asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
- asbd_dev2_out.mChannelsPerFrame);
-
- // We must get the sample rate of the input device and set it to the
- // stream format of AUHAL.
- Float64 rate = 0;
- property_size = sizeof(rate);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- input_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_sample_rate_ = rate;
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new formats to the AUs...
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the correct sample rate for the output device,
- // but keep the channel count the same.
- property_size = sizeof(rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- output_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_sample_rate_ = rate;
-
- // The requested sample-rate must match the hardware sample-rate.
- if (output_sample_rate_ != params_.sample_rate()) {
- LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
- << " must match the hardware sample-rate: " << output_sample_rate_;
- return kAudioDeviceUnsupportedFormatError;
- }
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new audio stream formats for the rest of the AUs...
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-void AudioSynchronizedStream::AllocateInputData() {
- // Allocate storage for the AudioBufferList used for the
- // input data from the input AudioUnit.
- // We allocate enough space for with one AudioBuffer per channel.
- size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
- (sizeof(AudioBuffer) * channels_);
-
- input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
- input_buffer_list_->mNumberBuffers = channels_;
-
- input_bus_ = AudioBus::Create(channels_, hardware_buffer_size_);
- wrapper_bus_ = AudioBus::CreateWrapper(channels_);
-
- // Allocate buffers for AudioBufferList.
- UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
- for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
- input_buffer_list_->mBuffers[i].mNumberChannels = 1;
- input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
- input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
- }
-}
-
-OSStatus AudioSynchronizedStream::HandleInputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
-
- if (first_input_time_ < 0.0)
- first_input_time_ = time_stamp->mSampleTime;
-
- // Get the new audio input data.
- OSStatus result = AudioUnitRender(
- input_unit_,
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- input_buffer_list_);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Buffer input into FIFO.
- int available_frames = fifo_.max_frames() - fifo_.frames();
- if (input_bus_->frames() <= available_frames)
- fifo_.Push(input_bus_.get());
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- if (fifo_.frames() < static_cast<int>(number_of_frames)) {
- // We don't DCHECK here, since this is a possible run-time condition
- // if the machine is bogged down.
- wrapper_bus_->Zero();
- return noErr;
- }
-
- // Read from the FIFO to feed the varispeed.
- fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::HandleOutputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- if (first_input_time_ < 0.0) {
- // Input callback hasn't run yet -> silence.
- ZeroBufferList(io_data);
- return noErr;
- }
-
- // Use the varispeed playback rate to offset small discrepancies
- // in hardware clocks, and also any differences in sample-rate
- // between input and output devices.
-
- // Calculate a varispeed rate scalar factor to compensate for drift between
- // input and output. We use the actual number of frames still in the FIFO
- // compared with the ideal value of |target_fifo_frames_|.
- int delta = fifo_.frames() - target_fifo_frames_;
-
- // Average |delta| because it can jitter back/forth quite frequently
- // by +/- the hardware buffer-size *if* the input and output callbacks are
- // happening at almost exactly the same time. Also, if the input and output
- // sample-rates are different then |delta| will jitter quite a bit due to
- // the rate conversion happening in the varispeed, plus the jittering of
- // the callbacks. The average value is what's important here.
- average_delta_ += (delta - average_delta_) * 0.1;
-
- // Compute a rate compensation which always attracts us back to the
- // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
- const double kCorrectionTimeSeconds = 0.1;
- double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
- fifo_rate_compensation_ =
- (correction_time_frames + average_delta_) / correction_time_frames;
-
- // Adjust for FIFO drift.
- OSStatus result = AudioUnitSetParameter(
- varispeed_unit_,
- kVarispeedParam_PlaybackRate,
- kAudioUnitScope_Global,
- 0,
- fifo_rate_compensation_,
- 0);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Render to the output using the varispeed.
- result = AudioUnitRender(
- varispeed_unit_,
- io_action_flags,
- time_stamp,
- 0,
- number_of_frames,
- io_data);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- // Process in-place!
- source_->OnMoreIOData(wrapper_bus_.get(),
- wrapper_bus_.get(),
- AudioBuffersState(0, 0));
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::InputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleInputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::VarispeedProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleVarispeedCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::OutputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleOutputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
- AudioDeviceID id, bool is_input) {
- id_ = id;
- is_input_ = is_input;
- if (id_ == kAudioDeviceUnknown)
- return;
-
- UInt32 property_size = sizeof(buffer_size_frames_);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- OSStatus result = AudioObjectGetPropertyData(
- id_,
- &pa,
- 0,
- 0,
- &property_size,
- &buffer_size_frames_);
-
- OSSTATUS_DCHECK(result == noErr, result);
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_synchronized_mac.h b/src/media/audio/mac/audio_synchronized_mac.h
deleted file mode 100644
index e99d9c8..0000000
--- a/src/media/audio/mac/audio_synchronized_mac.h
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-
-#include <AudioToolbox/AudioToolbox.h>
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/audio_fifo.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// AudioSynchronizedStream allows arbitrary combinations of input and output
-// devices running off different clocks and using different drivers, with
-// potentially differing sample-rates. It implements AudioOutputStream
-// and shuttles its synchronized I/O data using AudioSourceCallback.
-//
-// It is required to first acquire the native sample rate of the selected
-// output device and then use the same rate when creating this object.
-//
-// ............................................................................
-// Theory of Operation:
-// .
-// INPUT THREAD . OUTPUT THREAD
-// +-----------------+ +------+ .
-// | Input AudioUnit | --> | | .
-// +-----------------+ | | .
-// | FIFO | .
-// | | +-----------+
-// | | -----> | Varispeed |
-// | | +-----------+
-// +------+ . |
-// . | +-----------+
-// . OnMoreIOData() --> | Output AU |
-// . +-----------+
-//
-// The input AudioUnit's InputProc is called on one thread which feeds the
-// FIFO. The output AudioUnit's OutputProc is called on a second thread
-// which pulls on the varispeed to get the current input data. The varispeed
-// handles mismatches between input and output sample-rate and also clock drift
-// between the input and output drivers. The varispeed consumes its data from
-// the FIFO and adjusts its rate dynamically according to the amount
-// of data buffered in the FIFO. If the FIFO starts getting too much data
-// buffered then the varispeed will speed up slightly to compensate
-// and similarly if the FIFO doesn't have enough data buffered then the
-// varispeed will slow down slightly.
-//
-// Finally, once the input data is available then OnMoreIOData() is called
-// which is given this input, and renders the output which is finally sent
-// to the Output AudioUnit.
-class AudioSynchronizedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioSynchronizedStream(AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id);
-
- virtual ~AudioSynchronizedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
-
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
- OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
- AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
- AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
-
- bool IsRunning();
-
- private:
- // Initialization.
- OSStatus CreateAudioUnits();
- OSStatus SetupInput(AudioDeviceID input_id);
- OSStatus EnableIO();
- OSStatus SetupOutput(AudioDeviceID output_id);
- OSStatus SetupCallbacks();
- OSStatus SetupStreamFormats();
- void AllocateInputData();
-
- // Handlers for the AudioUnit callbacks.
- OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // AudioUnit callbacks.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus VarispeedProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus OutputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // Our creator.
- AudioManagerMac* manager_;
-
- // Client parameters.
- AudioParameters params_;
-
- double input_sample_rate_;
- double output_sample_rate_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Values used in Open().
- AudioDeviceID input_id_;
- AudioDeviceID output_id_;
-
- // The input AudioUnit renders its data here.
- AudioBufferList* input_buffer_list_;
-
- // Holds the actual data for |input_buffer_list_|.
- scoped_ptr<AudioBus> input_bus_;
-
- // Used to overlay AudioBufferLists.
- scoped_ptr<AudioBus> wrapper_bus_;
-
- class AudioDeviceInfo {
- public:
- AudioDeviceInfo()
- : id_(kAudioDeviceUnknown),
- is_input_(false),
- buffer_size_frames_(0) {}
- void Initialize(AudioDeviceID inID, bool isInput);
- bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
-
- AudioDeviceID id_;
- bool is_input_;
- UInt32 buffer_size_frames_;
- };
-
- AudioDeviceInfo input_info_;
- AudioDeviceInfo output_info_;
-
- // Used for input to output buffering.
- AudioFifo fifo_;
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- int target_fifo_frames_;
-
- // A running average of the measured delta between actual number of frames
- // in the FIFO versus |target_fifo_frames_|.
- double average_delta_;
-
- // A varispeed rate scalar which is calculated based on FIFO drift.
- double fifo_rate_compensation_;
-
- // AudioUnits.
- AudioUnit input_unit_;
- AudioUnit varispeed_unit_;
- AudioUnit output_unit_;
-
- double first_input_time_;
-
- bool is_running_;
- int hardware_buffer_size_;
- int channels_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/src/media/audio/mac/audio_unified_mac.cc b/src/media/audio/mac/audio_unified_mac.cc
deleted file mode 100644
index f8622e4..0000000
--- a/src/media/audio/mac/audio_unified_mac.cc
+++ /dev/null
@@ -1,398 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_unified_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-namespace media {
-
-// TODO(crogers): support more than hard-coded stereo input.
-// Ideally we would like to receive this value as a constructor argument.
-static const int kDefaultInputChannels = 2;
-
-AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- client_input_channels_(kDefaultInputChannels),
- volume_(1.0f),
- input_channels_(0),
- output_channels_(0),
- input_channels_per_frame_(0),
- output_channels_per_frame_(0),
- io_proc_id_(0),
- device_(kAudioObjectUnknown),
- is_playing_(false) {
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
-
- input_bus_ = AudioBus::Create(client_input_channels_,
- params.frames_per_buffer());
- output_bus_ = AudioBus::Create(params);
-}
-
-AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
- DCHECK_EQ(device_, kAudioObjectUnknown);
-}
-
-bool AudioHardwareUnifiedStream::Open() {
- // Obtain the current output device selected by the user.
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- UInt32 size = sizeof(device_);
-
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &device_);
-
- if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
- LOG(ERROR) << "Cannot open unified AudioDevice.";
- return false;
- }
-
- // The requested sample-rate must match the hardware sample-rate.
- Float64 sample_rate = 0.0;
- size = sizeof(sample_rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &sample_rate);
-
- if (result != noErr || sample_rate != format_.mSampleRate) {
- LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
- << " must match the hardware sample-rate: " << sample_rate;
- return false;
- }
-
- // Configure buffer frame size.
- UInt32 frame_size = number_of_frames_;
-
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
- return false;
- }
-
- pa.mScope = kAudioDevicePropertyScopeOutput;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
- return false;
- }
-
- DVLOG(1) << "Sample rate: " << sample_rate;
- DVLOG(1) << "Frame size: " << frame_size;
-
- // Determine the number of input and output channels.
- // We handle both the interleaved and non-interleaved cases.
-
- // Get input stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_array<uint8> input_list_storage(new uint8[size]);
- AudioBufferList& input_list =
- *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &input_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of input channels.
- input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
- input_list.mBuffers[0].mNumberChannels : 0;
- if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
- // Non-interleaved.
- input_channels_ = input_list.mNumberBuffers;
- } else {
- // Interleaved.
- input_channels_ = input_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Input channels: " << input_channels_;
- DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
-
- // The hardware must have at least the requested input channels.
- if (result != noErr || client_input_channels_ > input_channels_) {
- LOG(ERROR) << "AudioDevice does not support requested input channels.";
- return false;
- }
-
- // Get output stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeOutput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_array<uint8> output_list_storage(new uint8[size]);
- AudioBufferList& output_list =
- *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &output_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of output channels.
- output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
- if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
- // Non-interleaved.
- output_channels_ = output_list.mNumberBuffers;
- } else {
- // Interleaved.
- output_channels_ = output_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Output channels: " << output_channels_;
- DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
-
- // The hardware must have at least the requested output channels.
- if (result != noErr ||
- output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
- LOG(ERROR) << "AudioDevice does not support requested output channels.";
- return false;
- }
-
- // Setup the I/O proc.
- result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error creating IOProc.";
- return false;
- }
-
- return true;
-}
-
-void AudioHardwareUnifiedStream::Close() {
- DCHECK(!is_playing_);
-
- OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- io_proc_id_ = 0;
- device_ = kAudioObjectUnknown;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK_NE(device_, kAudioObjectUnknown);
- DCHECK(!is_playing_);
- if (device_ == kAudioObjectUnknown || is_playing_)
- return;
-
- source_ = callback;
-
- OSStatus result = AudioDeviceStart(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr)
- is_playing_ = true;
-}
-
-void AudioHardwareUnifiedStream::Stop() {
- if (!is_playing_)
- return;
-
- if (device_ != kAudioObjectUnknown) {
- OSStatus result = AudioDeviceStop(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
-
- is_playing_ = false;
- source_ = NULL;
-}
-
-void AudioHardwareUnifiedStream::SetVolume(double volume) {
- volume_ = static_cast<float>(volume);
- // TODO(crogers): set volume property
-}
-
-void AudioHardwareUnifiedStream::GetVolume(double* volume) {
- *volume = volume_;
-}
-
-// Pulls on our provider with optional input, asking it to render output.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AudioHardwareUnifiedStream::Render(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time) {
- // Convert the input data accounting for possible interleaving.
- // TODO(crogers): it's better to simply memcpy() if source is already planar.
- if (input_channels_ >= client_input_channels_) {
- for (int channel_index = 0; channel_index < client_input_channels_;
- ++channel_index) {
- float* source;
-
- int source_channel_index = channel_index;
-
- if (input_channels_per_frame_ > 1) {
- // Interleaved.
- source = static_cast<float*>(input_data->mBuffers[0].mData) +
- source_channel_index;
- } else {
- // Non-interleaved.
- source = static_cast<float*>(
- input_data->mBuffers[source_channel_index].mData);
- }
-
- float* p = input_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- p[i] = *source;
- source += input_channels_per_frame_;
- }
- }
- } else if (input_channels_) {
- input_bus_->Zero();
- }
-
- // Give the client optional input data and have it render the output data.
- source_->OnMoreIOData(input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, 0));
-
- // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
-
- // Handle interleaving as necessary.
- // TODO(crogers): it's better to simply memcpy() if dest is already planar.
-
- for (int channel_index = 0;
- channel_index < static_cast<int>(format_.mChannelsPerFrame);
- ++channel_index) {
- float* dest;
-
- int dest_channel_index = channel_index;
-
- if (output_channels_per_frame_ > 1) {
- // Interleaved.
- dest = static_cast<float*>(output_data->mBuffers[0].mData) +
- dest_channel_index;
- } else {
- // Non-interleaved.
- dest = static_cast<float*>(
- output_data->mBuffers[dest_channel_index].mData);
- }
-
- float* p = output_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- *dest = p[i];
- dest += output_channels_per_frame_;
- }
- }
-
- return noErr;
-}
-
-OSStatus AudioHardwareUnifiedStream::RenderProc(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data) {
- AudioHardwareUnifiedStream* audio_output =
- static_cast<AudioHardwareUnifiedStream*>(user_data);
- DCHECK(audio_output);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(
- device,
- now,
- input_data,
- input_time,
- output_data,
- output_time);
-}
-
-} // namespace media
diff --git a/src/media/audio/mac/audio_unified_mac.h b/src/media/audio/mac/audio_unified_mac.h
deleted file mode 100644
index ff090e3..0000000
--- a/src/media/audio/mac/audio_unified_mac.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOutputStream for Mac OS X using the
-// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
-// when using devices which support *both* input and output
-// in the same driver. This is the case with professional
-// USB and Firewire devices.
-//
-// Please note that it's required to first get the native sample-rate of the
-// default output device and use that sample-rate when creating this object.
-class AudioHardwareUnifiedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioHardwareUnifiedStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AudioHardwareUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- int input_channels() const { return input_channels_; }
- int output_channels() const { return output_channels_; }
-
- private:
- OSStatus Render(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time);
-
- static OSStatus RenderProc(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // Hardware buffer size.
- int number_of_frames_;
-
- // Number of audio channels provided to the client via OnMoreIOData().
- int client_input_channels_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Number of input and output channels queried from the hardware.
- int input_channels_;
- int output_channels_;
- int input_channels_per_frame_;
- int output_channels_per_frame_;
-
- AudioDeviceIOProcID io_proc_id_;
- AudioDeviceID device_;
- bool is_playing_;
-
- // Intermediate buffers used with call to OnMoreIOData().
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
diff --git a/src/media/audio/mock_audio_manager.cc b/src/media/audio/mock_audio_manager.cc
deleted file mode 100644
index 9a40b65..0000000
--- a/src/media/audio/mock_audio_manager.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mock_audio_manager.h"
-
-#include "base/logging.h"
-#include "base/message_loop_proxy.h"
-
-namespace media {
-
-MockAudioManager::MockAudioManager(base::MessageLoopProxy* message_loop_proxy)
- : message_loop_proxy_(message_loop_proxy) {
-}
-
-MockAudioManager::~MockAudioManager() {
-}
-
-bool MockAudioManager::HasAudioOutputDevices() {
- return true;
-}
-
-bool MockAudioManager::HasAudioInputDevices() {
- return true;
-}
-
-string16 MockAudioManager::GetAudioInputDeviceModel() {
- return string16();
-}
-
-bool MockAudioManager::CanShowAudioInputSettings() {
- return false;
-}
-
-void MockAudioManager::ShowAudioInputSettings() {
-}
-
-void MockAudioManager::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
-}
-
-media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
- const media::AudioParameters& params) {
- NOTREACHED();
- return NULL;
-}
-
-media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
- const media::AudioParameters& params) {
- NOTREACHED();
- return NULL;
-}
-
-media::AudioInputStream* MockAudioManager::MakeAudioInputStream(
- const media::AudioParameters& params,
- const std::string& device_id) {
- NOTREACHED();
- return NULL;
-}
-
-bool MockAudioManager::IsRecordingInProcess() {
- return false;
-}
-
-scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetMessageLoop() {
- return message_loop_proxy_;
-}
-
-void MockAudioManager::AddOutputDeviceChangeListener(
- AudioDeviceListener* listener) {
-}
-
-void MockAudioManager::RemoveOutputDeviceChangeListener(
- AudioDeviceListener* listener) {
-}
-
-} // namespace media.
diff --git a/src/media/audio/mock_audio_manager.h b/src/media/audio/mock_audio_manager.h
deleted file mode 100644
index 6a94055..0000000
--- a/src/media/audio/mock_audio_manager.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
-#define MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
-
-#include "media/audio/audio_manager.h"
-
-namespace media {
-
-// This class is a simple mock around AudioManager, used exclusively for tests,
-// which has the following purposes:
-// 1) Avoids to use the actual (system and platform dependent) AudioManager.
-// Some bots does not have input devices, thus using the actual AudioManager
-// would causing failures on classes which expect that.
-// 2) Allows the mock audio events to be dispatched on an arbitrary thread,
-// rather than forcing them on the audio thread, easing their handling in
-// browser tests (Note: sharing a thread can cause deadlocks on production
-// classes if WaitableEvents or any other form of lock is used for
-// synchronization purposes).
-class MockAudioManager : public media::AudioManager {
- public:
- explicit MockAudioManager(base::MessageLoopProxy* message_loop_proxy);
-
- virtual bool HasAudioOutputDevices() OVERRIDE;
-
- virtual bool HasAudioInputDevices() OVERRIDE;
-
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
-
- virtual bool CanShowAudioInputSettings() OVERRIDE;
-
- virtual void ShowAudioInputSettings() OVERRIDE;
-
- virtual void GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) OVERRIDE;
-
- virtual media::AudioOutputStream* MakeAudioOutputStream(
- const media::AudioParameters& params) OVERRIDE;
-
- virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
- const media::AudioParameters& params) OVERRIDE;
-
- virtual media::AudioInputStream* MakeAudioInputStream(
- const media::AudioParameters& params,
- const std::string& device_id) OVERRIDE;
-
- virtual bool IsRecordingInProcess() OVERRIDE;
-
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
-
- virtual void AddOutputDeviceChangeListener(
- AudioDeviceListener* listener) OVERRIDE;
- virtual void RemoveOutputDeviceChangeListener(
- AudioDeviceListener* listener) OVERRIDE;
-
- private:
- virtual ~MockAudioManager();
-
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
-
- DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_MOCK_AUDIO_MANAGER_H_
diff --git a/src/media/audio/mock_shell_audio_streamer.h b/src/media/audio/mock_shell_audio_streamer.h
deleted file mode 100644
index b6f2325..0000000
--- a/src/media/audio/mock_shell_audio_streamer.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_MOCK_SHELL_AUDIO_STREAMER_H_
-#define MEDIA_AUDIO_MOCK_SHELL_AUDIO_STREAMER_H_
-
-#include "media/audio/shell_audio_streamer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockShellAudioStream : public ShellAudioStream {
- public:
- MockShellAudioStream() {}
- MOCK_CONST_METHOD0(PauseRequested, bool ());
- MOCK_METHOD2(PullFrames, bool (uint32_t*, uint32_t*));
- MOCK_METHOD1(ConsumeFrames, void (uint32_t));
- MOCK_CONST_METHOD0(GetAudioParameters, const AudioParameters& ());
- MOCK_METHOD0(GetAudioBus, AudioBus* ());
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockShellAudioStream);
-};
-
-class MockShellAudioStreamer : public ShellAudioStreamer {
- public:
- MockShellAudioStreamer() {}
- MOCK_CONST_METHOD0(GetConfig, Config ());
- MOCK_METHOD1(AddStream, bool (ShellAudioStream*));
- MOCK_METHOD1(RemoveStream, void (ShellAudioStream*));
- MOCK_CONST_METHOD1(HasStream, bool (ShellAudioStream*));
- MOCK_METHOD2(SetVolume, bool (ShellAudioStream*, double));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockShellAudioStreamer);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MOCK_SHELL_AUDIO_STREAMER_H_
-
diff --git a/src/media/audio/null_audio_sink.cc b/src/media/audio/null_audio_sink.cc
deleted file mode 100644
index c93ceb1..0000000
--- a/src/media/audio/null_audio_sink.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/null_audio_sink.h"
-
-#include "base/bind.h"
-#include "base/stringprintf.h"
-#include "base/sys_byteorder.h"
-#include "base/threading/platform_thread.h"
-
-namespace media {
-
-NullAudioSink::NullAudioSink()
- : initialized_(false),
- playing_(false),
- callback_(NULL),
- thread_("NullAudioThread"),
- hash_audio_for_testing_(false) {
-}
-
-void NullAudioSink::Initialize(const AudioParameters& params,
- RenderCallback* callback) {
- DCHECK(!initialized_);
- params_ = params;
-
- audio_bus_ = AudioBus::Create(params_);
-
- if (hash_audio_for_testing_) {
- md5_channel_contexts_.reset(new base::MD5Context[params_.channels()]);
- for (int i = 0; i < params_.channels(); i++)
- base::MD5Init(&md5_channel_contexts_[i]);
- }
-
- callback_ = callback;
- initialized_ = true;
-}
-
-void NullAudioSink::Start() {
- if (!thread_.Start())
- return;
-
- thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &NullAudioSink::FillBufferTask, this));
-}
-
-void NullAudioSink::Stop() {
- SetPlaying(false);
- thread_.Stop();
-}
-
-void NullAudioSink::Play() {
- SetPlaying(true);
-}
-
-void NullAudioSink::Pause(bool /* flush */) {
- SetPlaying(false);
-}
-
-bool NullAudioSink::SetVolume(double volume) {
- // Audio is always muted.
- return volume == 0.0;
-}
-
-void NullAudioSink::SetPlaying(bool is_playing) {
- base::AutoLock auto_lock(lock_);
- playing_ = is_playing;
-}
-
-NullAudioSink::~NullAudioSink() {
- DCHECK(!thread_.IsRunning());
-}
-
-void NullAudioSink::FillBufferTask() {
- base::AutoLock auto_lock(lock_);
-
- base::TimeDelta delay;
- // Only consume buffers when actually playing.
- if (playing_) {
- int frames_received = callback_->Render(audio_bus_.get(), 0);
- int frames_per_millisecond =
- params_.sample_rate() / base::Time::kMillisecondsPerSecond;
-
- if (hash_audio_for_testing_ && frames_received > 0) {
- DCHECK_EQ(sizeof(float), sizeof(uint32));
- int channels = audio_bus_->channels();
- for (int channel_idx = 0; channel_idx < channels; ++channel_idx) {
- float* channel = audio_bus_->channel(channel_idx);
- for (int frame_idx = 0; frame_idx < frames_received; frame_idx++) {
- // Convert float to uint32 w/o conversion loss.
- uint32 frame = base::ByteSwapToLE32(
- bit_cast<uint32>(channel[frame_idx]));
- base::MD5Update(
- &md5_channel_contexts_[channel_idx], base::StringPiece(
- reinterpret_cast<char*>(&frame), sizeof(frame)));
- }
- }
- }
-
- // Calculate our sleep duration.
- delay = base::TimeDelta::FromMilliseconds(
- frames_received / frames_per_millisecond);
- } else {
- // If paused, sleep for 10 milliseconds before polling again.
- delay = base::TimeDelta::FromMilliseconds(10);
- }
-
- // Sleep for at least one millisecond so we don't spin the CPU.
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&NullAudioSink::FillBufferTask, this),
- std::max(delay, base::TimeDelta::FromMilliseconds(1)));
-}
-
-void NullAudioSink::StartAudioHashForTesting() {
- DCHECK(!initialized_);
- hash_audio_for_testing_ = true;
-}
-
-std::string NullAudioSink::GetAudioHashForTesting() {
- DCHECK(hash_audio_for_testing_);
-
- // If initialize failed or was never called, ensure we return an empty hash.
- int channels = 1;
- if (!initialized_) {
- md5_channel_contexts_.reset(new base::MD5Context[1]);
- base::MD5Init(&md5_channel_contexts_[0]);
- } else {
- channels = audio_bus_->channels();
- }
-
- // Hash all channels into the first channel.
- base::MD5Digest digest;
- for (int i = 1; i < channels; i++) {
- base::MD5Final(&digest, &md5_channel_contexts_[i]);
- base::MD5Update(&md5_channel_contexts_[0], base::StringPiece(
- reinterpret_cast<char*>(&digest), sizeof(base::MD5Digest)));
- }
-
- base::MD5Final(&digest, &md5_channel_contexts_[0]);
- return base::MD5DigestToBase16(digest);
-}
-
-} // namespace media
diff --git a/src/media/audio/null_audio_sink.h b/src/media/audio/null_audio_sink.h
deleted file mode 100644
index 7d73f93..0000000
--- a/src/media/audio/null_audio_sink.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-#define MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-
-// NullAudioSink effectively uses an extra thread to "throw away" the
-// audio data at a rate resembling normal playback speed. It's just like
-// decoding to /dev/null!
-//
-// NullAudioSink can also be used in situations where the client has no
-// audio device or we haven't written an audio implementation for a particular
-// platform yet.
-
-#include "base/md5.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/thread.h"
-#include "media/base/audio_renderer_sink.h"
-
-namespace media {
-class AudioBus;
-
-class MEDIA_EXPORT NullAudioSink
- : NON_EXPORTED_BASE(public AudioRendererSink) {
- public:
- NullAudioSink();
-
- // AudioRendererSink implementation.
- virtual void Initialize(const AudioParameters& params,
- RenderCallback* callback) OVERRIDE;
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Pause(bool flush) OVERRIDE;
- virtual void Play() OVERRIDE;
- virtual bool SetVolume(double volume) OVERRIDE;
- virtual void ResumeAfterUnderflow(bool buffer_more_audio) OVERRIDE {}
-
- // Enables audio frame hashing and reinitializes the MD5 context. Must be
- // called prior to Initialize().
- void StartAudioHashForTesting();
-
- // Returns the MD5 hash of all audio frames seen since the last reset.
- std::string GetAudioHashForTesting();
-
- protected:
- virtual ~NullAudioSink();
-
- private:
- // Audio thread task that periodically calls FillBuffer() to consume
- // audio data.
- void FillBufferTask();
-
- void SetPlaying(bool is_playing);
-
- // A buffer passed to FillBuffer to advance playback.
- scoped_ptr<AudioBus> audio_bus_;
-
- AudioParameters params_;
- bool initialized_;
- bool playing_;
- RenderCallback* callback_;
-
- // Separate thread used to throw away data.
- base::Thread thread_;
- base::Lock lock_;
-
- // Controls whether or not a running MD5 hash is computed for audio frames.
- bool hash_audio_for_testing_;
- scoped_array<base::MD5Context> md5_channel_contexts_;
-
- DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
diff --git a/src/media/audio/null_audio_streamer.cc b/src/media/audio/null_audio_streamer.cc
deleted file mode 100644
index 0937b17..0000000
--- a/src/media/audio/null_audio_streamer.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2016 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/audio/null_audio_streamer.h"
-
-#include <algorithm>
-
-#include "media/audio/audio_parameters.h"
-#include "media/mp4/aac.h"
-
-namespace media {
-
-NullAudioStreamer::NullAudioStreamer()
- : null_streamer_thread_("Null Audio Streamer") {
- null_streamer_thread_.Start();
- null_streamer_thread_.message_loop()->PostTask(
- FROM_HERE, base::Bind(&NullAudioStreamer::StartNullStreamer,
- base::Unretained(this)));
-}
-
-NullAudioStreamer::~NullAudioStreamer() {
- null_streamer_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&NullAudioStreamer::StopNullStreamer, base::Unretained(this)));
- null_streamer_thread_.Stop();
-}
-
-ShellAudioStreamer::Config NullAudioStreamer::GetConfig() const {
- // Reasonable looking settings.
- const uint32 initial_rebuffering_frames_per_channel =
- mp4::AAC::kFramesPerAccessUnit * 32;
- const uint32 sink_buffer_size_in_frames_per_channel =
- initial_rebuffering_frames_per_channel * 8;
- const uint32 max_hardware_channels = 2;
-
- return Config(Config::INTERLEAVED, initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, max_hardware_channels,
- sizeof(float) /* bytes_per_sample */);
-}
-
-bool NullAudioStreamer::AddStream(ShellAudioStream* stream) {
- base::AutoLock auto_lock(streams_lock_);
- streams_.insert(std::make_pair(stream, NullAudioStream()));
- return true;
-}
-
-void NullAudioStreamer::RemoveStream(ShellAudioStream* stream) {
- base::AutoLock auto_lock(streams_lock_);
- DLOG(INFO) << "Remove";
- streams_.erase(stream);
-}
-
-bool NullAudioStreamer::HasStream(ShellAudioStream* stream) const {
- base::AutoLock auto_lock(streams_lock_);
- return streams_.find(stream) != streams_.end();
-}
-
-void NullAudioStreamer::StartNullStreamer() {
- last_run_time_ = base::Time::Now();
- advance_streams_timer_.emplace();
- advance_streams_timer_->Start(
- FROM_HERE, base::TimeDelta::FromMilliseconds(10),
- base::Bind(&NullAudioStreamer::AdvanceStreams, base::Unretained(this)));
-}
-
-void NullAudioStreamer::StopNullStreamer() {
- advance_streams_timer_->Stop();
- advance_streams_timer_ = base::nullopt;
-}
-
-void NullAudioStreamer::AdvanceStreams() {
- base::Time now = base::Time::Now();
- base::TimeDelta time_played = now - last_run_time_;
- last_run_time_ = now;
-
- base::AutoLock auto_lock(streams_lock_);
- for (NullAudioStreamMap::iterator it = streams_.begin(); it != streams_.end();
- ++it) {
- PullFrames(it->first, time_played, &it->second);
- }
-}
-
-void NullAudioStreamer::PullFrames(ShellAudioStream* stream,
- base::TimeDelta time_played,
- NullAudioStream* null_stream) {
- // Calculate how many frames were consumed.
- int sample_rate = stream->GetAudioParameters().sample_rate();
- uint32 frames_played = sample_rate * time_played.InSecondsF();
- frames_played = std::min(frames_played, null_stream->total_available_frames);
- if (!stream->PauseRequested()) {
- stream->ConsumeFrames(frames_played);
- }
- // Pull more frames.
- stream->PullFrames(NULL, &null_stream->total_available_frames);
-}
-
-} // namespace media
diff --git a/src/media/audio/null_audio_streamer.h b/src/media/audio/null_audio_streamer.h
deleted file mode 100644
index 5342f23..0000000
--- a/src/media/audio/null_audio_streamer.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2016 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_NULL_AUDIO_STREAMER_H_
-#define MEDIA_AUDIO_NULL_AUDIO_STREAMER_H_
-
-#include <map>
-
-#include "base/memory/singleton.h"
-#include "base/optional.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
-#include "base/time.h"
-#include "base/timer.h"
-#include "media/audio/shell_audio_streamer.h"
-
-namespace media {
-
-// This class implements a ShellAudioStreamer to be used when output to an
-// audio device is not possible or not desired. It starts a thread that will
-// regularly pull and consume frames from any added ShellAudioStreams at a rate
-// expected by the stream's sampling frequency.
-class NullAudioStreamer : public ShellAudioStreamer {
- public:
- static NullAudioStreamer* GetInstance() {
- return Singleton<NullAudioStreamer>::get();
- }
-
- Config GetConfig() const OVERRIDE;
- bool AddStream(ShellAudioStream* stream) OVERRIDE;
- void RemoveStream(ShellAudioStream* stream) OVERRIDE;
- bool HasStream(ShellAudioStream* stream) const OVERRIDE;
- bool SetVolume(ShellAudioStream* /* stream */, double /* volume*/) OVERRIDE {
- return true;
- }
-
- private:
- struct NullAudioStream {
- NullAudioStream() : total_available_frames(0) {}
- uint32 total_available_frames;
- };
-
- NullAudioStreamer();
- ~NullAudioStreamer() OVERRIDE;
-
- void StartNullStreamer();
- void StopNullStreamer();
- void AdvanceStreams();
- void PullFrames(ShellAudioStream* stream,
- base::TimeDelta time_played,
- NullAudioStream* null_audio_stream);
-
- base::Thread null_streamer_thread_;
- typedef base::RepeatingTimer<NullAudioStreamer> RepeatingTimer;
- base::optional<RepeatingTimer> advance_streams_timer_;
- base::Time last_run_time_;
-
- mutable base::Lock streams_lock_;
- typedef std::map<ShellAudioStream*, NullAudioStream> NullAudioStreamMap;
- NullAudioStreamMap streams_;
-
- DISALLOW_COPY_AND_ASSIGN(NullAudioStreamer);
- friend struct DefaultSingletonTraits<NullAudioStreamer>;
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_NULL_AUDIO_STREAMER_H_
diff --git a/src/media/audio/openbsd/audio_manager_openbsd.cc b/src/media/audio/openbsd/audio_manager_openbsd.cc
deleted file mode 100644
index 84304a5..0000000
--- a/src/media/audio/openbsd/audio_manager_openbsd.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/openbsd/audio_manager_openbsd.h"
-
-#include "base/command_line.h"
-#include "base/stl_util.h"
-#include "media/audio/audio_output_dispatcher.h"
-#if defined(USE_PULSEAUDIO)
-#include "media/audio/pulse/pulse_output.h"
-#endif
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-#include <fcntl.h>
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-// Implementation of AudioManager.
-static bool HasAudioHardware() {
- int fd;
- const char *file;
-
- if ((file = getenv("AUDIOCTLDEVICE")) == 0 || *file == '\0')
- file = "/dev/audioctl";
-
- if ((fd = open(file, O_RDONLY)) < 0)
- return false;
-
- close(fd);
- return true;
-}
-
-bool AudioManagerOpenBSD::HasAudioOutputDevices() {
- return HasAudioHardware();
-}
-
-bool AudioManagerOpenBSD::HasAudioInputDevices() {
- return HasAudioHardware();
-}
-
-AudioManagerOpenBSD::AudioManagerOpenBSD() {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-}
-
-AudioManagerOpenBSD::~AudioManagerOpenBSD() {
- Shutdown();
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
- return MakeOutputStream(params);
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
- return MakeOutputStream(params);
-}
-
-AudioInputStream* AudioManagerOpenBSD::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
- NOTIMPLEMENTED();
- return NULL;
-}
-
-AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
- NOTIMPLEMENTED();
- return NULL;
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream(
- const AudioParameters& params) {
-#if defined(USE_PULSEAUDIO)
- if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUsePulseAudio)) {
- return new PulseAudioOutputStream(params, this);
- }
-#endif
-
- NOTIMPLEMENTED();
- return NULL;
-}
-
-// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerOpenBSD();
-}
-
-} // namespace media
diff --git a/src/media/audio/openbsd/audio_manager_openbsd.h b/src/media/audio/openbsd/audio_manager_openbsd.h
deleted file mode 100644
index aeba43e..0000000
--- a/src/media/audio/openbsd/audio_manager_openbsd.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
-#define MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
-
-#include <set>
-
-#include "base/compiler_specific.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
- public:
- AudioManagerOpenBSD();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
-
- protected:
- virtual ~AudioManagerOpenBSD();
-
- private:
- // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
- AudioOutputStream* MakeOutputStream(const AudioParameters& params);
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerOpenBSD);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
diff --git a/src/media/audio/pulse/pulse_output.cc b/src/media/audio/pulse/pulse_output.cc
deleted file mode 100644
index 0687e6e..0000000
--- a/src/media/audio/pulse/pulse_output.cc
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/pulse/pulse_output.h"
-
-#include <pulse/pulseaudio.h>
-
-#include "base/message_loop.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/audio_util.h"
-
-namespace media {
-
-// A helper class that acquires pa_threaded_mainloop_lock() while in scope.
-class AutoPulseLock {
- public:
- explicit AutoPulseLock(pa_threaded_mainloop* pa_mainloop)
- : pa_mainloop_(pa_mainloop) {
- pa_threaded_mainloop_lock(pa_mainloop_);
- }
-
- ~AutoPulseLock() {
- pa_threaded_mainloop_unlock(pa_mainloop_);
- }
-
- private:
- pa_threaded_mainloop* pa_mainloop_;
-
- DISALLOW_COPY_AND_ASSIGN(AutoPulseLock);
-};
-
-static pa_sample_format_t BitsToPASampleFormat(int bits_per_sample) {
- switch (bits_per_sample) {
- case 8:
- return PA_SAMPLE_U8;
- case 16:
- return PA_SAMPLE_S16LE;
- case 24:
- return PA_SAMPLE_S24LE;
- case 32:
- return PA_SAMPLE_S32LE;
- default:
- NOTREACHED() << "Invalid bits per sample: " << bits_per_sample;
- return PA_SAMPLE_INVALID;
- }
-}
-
-static pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
- switch (channel) {
- // PulseAudio does not differentiate between left/right and
- // stereo-left/stereo-right, both translate to front-left/front-right.
- case LEFT:
- return PA_CHANNEL_POSITION_FRONT_LEFT;
- case RIGHT:
- return PA_CHANNEL_POSITION_FRONT_RIGHT;
- case CENTER:
- return PA_CHANNEL_POSITION_FRONT_CENTER;
- case LFE:
- return PA_CHANNEL_POSITION_LFE;
- case BACK_LEFT:
- return PA_CHANNEL_POSITION_REAR_LEFT;
- case BACK_RIGHT:
- return PA_CHANNEL_POSITION_REAR_RIGHT;
- case LEFT_OF_CENTER:
- return PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
- case RIGHT_OF_CENTER:
- return PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
- case BACK_CENTER:
- return PA_CHANNEL_POSITION_REAR_CENTER;
- case SIDE_LEFT:
- return PA_CHANNEL_POSITION_SIDE_LEFT;
- case SIDE_RIGHT:
- return PA_CHANNEL_POSITION_SIDE_RIGHT;
- case CHANNELS_MAX:
- return PA_CHANNEL_POSITION_INVALID;
- default:
- NOTREACHED() << "Invalid channel: " << channel;
- return PA_CHANNEL_POSITION_INVALID;
- }
-}
-
-static pa_channel_map ChannelLayoutToPAChannelMap(
- ChannelLayout channel_layout) {
- pa_channel_map channel_map;
- pa_channel_map_init(&channel_map);
-
- channel_map.channels = ChannelLayoutToChannelCount(channel_layout);
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
- ch = static_cast<Channels>(ch + 1)) {
- int channel_index = ChannelOrder(channel_layout, ch);
- if (channel_index < 0)
- continue;
-
- channel_map.map[channel_index] = ChromiumToPAChannelPosition(ch);
- }
-
- return channel_map;
-}
-
-// static, pa_context_notify_cb
-void PulseAudioOutputStream::ContextNotifyCallback(pa_context* c,
- void* p_this) {
- PulseAudioOutputStream* stream = static_cast<PulseAudioOutputStream*>(p_this);
-
- // Forward unexpected failures to the AudioSourceCallback if available. All
- // these variables are only modified under pa_threaded_mainloop_lock() so this
- // should be thread safe.
- if (c && stream->source_callback_ &&
- pa_context_get_state(c) == PA_CONTEXT_FAILED) {
- stream->source_callback_->OnError(stream, pa_context_errno(c));
- }
-
- pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
-}
-
-// static, pa_stream_notify_cb
-void PulseAudioOutputStream::StreamNotifyCallback(pa_stream* s, void* p_this) {
- PulseAudioOutputStream* stream = static_cast<PulseAudioOutputStream*>(p_this);
-
- // Forward unexpected failures to the AudioSourceCallback if available. All
- // these variables are only modified under pa_threaded_mainloop_lock() so this
- // should be thread safe.
- if (s && stream->source_callback_ &&
- pa_stream_get_state(s) == PA_STREAM_FAILED) {
- stream->source_callback_->OnError(
- stream, pa_context_errno(stream->pa_context_));
- }
-
- pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
-}
-
-// static, pa_stream_success_cb_t
-void PulseAudioOutputStream::StreamSuccessCallback(pa_stream* s, int success,
- void* p_this) {
- PulseAudioOutputStream* stream = static_cast<PulseAudioOutputStream*>(p_this);
- pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
-}
-
-// static, pa_stream_request_cb_t
-void PulseAudioOutputStream::StreamRequestCallback(pa_stream* s, size_t len,
- void* p_this) {
- // Fulfill write request; must always result in a pa_stream_write() call.
- static_cast<PulseAudioOutputStream*>(p_this)->FulfillWriteRequest(len);
-}
-
-PulseAudioOutputStream::PulseAudioOutputStream(const AudioParameters& params,
- AudioManagerBase* manager)
- : params_(params),
- manager_(manager),
- pa_context_(NULL),
- pa_mainloop_(NULL),
- pa_stream_(NULL),
- volume_(1.0f),
- source_callback_(NULL) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- CHECK(params_.IsValid());
- audio_bus_ = AudioBus::Create(params_);
-}
-
-PulseAudioOutputStream::~PulseAudioOutputStream() {
- // All internal structures should already have been freed in Close(), which
- // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
- DCHECK(!pa_stream_);
- DCHECK(!pa_context_);
- DCHECK(!pa_mainloop_);
-}
-
-// Helper macro for Open() to avoid code spam and string bloat.
-#define RETURN_ON_FAILURE(expression, message) do { \
- if (!(expression)) { \
- if (pa_context_) { \
- DLOG(ERROR) << message << " Error: " \
- << pa_strerror(pa_context_errno(pa_context_)); \
- } else { \
- DLOG(ERROR) << message; \
- } \
- return false; \
- } \
-} while(0)
-
-bool PulseAudioOutputStream::Open() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- pa_mainloop_ = pa_threaded_mainloop_new();
- RETURN_ON_FAILURE(pa_mainloop_, "Failed to create PulseAudio main loop.");
-
- pa_mainloop_api* pa_mainloop_api = pa_threaded_mainloop_get_api(pa_mainloop_);
- pa_context_ = pa_context_new(pa_mainloop_api, "Chromium");
- RETURN_ON_FAILURE(pa_context_, "Failed to create PulseAudio context.");
-
- // A state callback must be set before calling pa_threaded_mainloop_lock() or
- // pa_threaded_mainloop_wait() calls may lead to dead lock.
- pa_context_set_state_callback(pa_context_, &ContextNotifyCallback, this);
-
- // Lock the main loop while setting up the context. Failure to do so may lead
- // to crashes as the PulseAudio thread tries to run before things are ready.
- AutoPulseLock auto_lock(pa_mainloop_);
-
- RETURN_ON_FAILURE(
- pa_threaded_mainloop_start(pa_mainloop_) == 0,
- "Failed to start PulseAudio main loop.");
- RETURN_ON_FAILURE(
- pa_context_connect(pa_context_, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL) == 0,
- "Failed to connect PulseAudio context.");
-
- // Wait until |pa_context_| is ready. pa_threaded_mainloop_wait() must be
- // called after pa_context_get_state() in case the context is already ready,
- // otherwise pa_threaded_mainloop_wait() will hang indefinitely.
- while (true) {
- pa_context_state_t context_state = pa_context_get_state(pa_context_);
- RETURN_ON_FAILURE(
- PA_CONTEXT_IS_GOOD(context_state), "Invalid PulseAudio context state.");
- if (context_state == PA_CONTEXT_READY)
- break;
- pa_threaded_mainloop_wait(pa_mainloop_);
- }
-
- // Set sample specifications.
- pa_sample_spec pa_sample_specifications;
- pa_sample_specifications.format = BitsToPASampleFormat(
- params_.bits_per_sample());
- pa_sample_specifications.rate = params_.sample_rate();
- pa_sample_specifications.channels = params_.channels();
-
- // Get channel mapping and open playback stream.
- pa_channel_map* map = NULL;
- pa_channel_map source_channel_map = ChannelLayoutToPAChannelMap(
- params_.channel_layout());
- if (source_channel_map.channels != 0) {
- // The source data uses a supported channel map so we will use it rather
- // than the default channel map (NULL).
- map = &source_channel_map;
- }
- pa_stream_ = pa_stream_new(
- pa_context_, "Playback", &pa_sample_specifications, map);
- RETURN_ON_FAILURE(pa_stream_, "Failed to create PulseAudio stream.");
- pa_stream_set_state_callback(pa_stream_, &StreamNotifyCallback, this);
-
- // Even though we start the stream corked below, PulseAudio will issue one
- // stream request after setup. FulfillWriteRequest() must fulfill the write.
- pa_stream_set_write_callback(pa_stream_, &StreamRequestCallback, this);
-
- // Tell pulse audio we only want callbacks of a certain size.
- pa_buffer_attr pa_buffer_attributes;
- pa_buffer_attributes.maxlength = params_.GetBytesPerBuffer();
- pa_buffer_attributes.minreq = params_.GetBytesPerBuffer();
- pa_buffer_attributes.prebuf = params_.GetBytesPerBuffer();
- pa_buffer_attributes.tlength = params_.GetBytesPerBuffer();
- pa_buffer_attributes.fragsize = static_cast<uint32_t>(-1);
-
- // Connect playback stream.
- // TODO(dalecurtis): Pulse tends to want really large buffer sizes if we are
- // not using the native sample rate. We should always open the stream with
- // PA_STREAM_FIX_RATE and ensure this is true.
- RETURN_ON_FAILURE(
- pa_stream_connect_playback(
- pa_stream_, NULL, &pa_buffer_attributes,
- static_cast<pa_stream_flags_t>(
- PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE |
- PA_STREAM_NOT_MONOTONIC | PA_STREAM_START_CORKED),
- NULL, NULL) == 0,
- "Failed to connect PulseAudio stream.");
-
- // Wait for the stream to be ready.
- while (true) {
- pa_stream_state_t stream_state = pa_stream_get_state(pa_stream_);
- RETURN_ON_FAILURE(
- PA_STREAM_IS_GOOD(stream_state), "Invalid PulseAudio stream state.");
- if (stream_state == PA_STREAM_READY)
- break;
- pa_threaded_mainloop_wait(pa_mainloop_);
- }
-
- return true;
-}
-
-#undef RETURN_ON_FAILURE
-
-void PulseAudioOutputStream::Reset() {
- if (!pa_mainloop_) {
- DCHECK(!pa_stream_);
- DCHECK(!pa_context_);
- return;
- }
-
- {
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Close the stream.
- if (pa_stream_) {
- // Ensure all samples are played out before shutdown.
- WaitForPulseOperation(pa_stream_flush(
- pa_stream_, &StreamSuccessCallback, this));
-
- // Release PulseAudio structures.
- pa_stream_disconnect(pa_stream_);
- pa_stream_set_write_callback(pa_stream_, NULL, NULL);
- pa_stream_set_state_callback(pa_stream_, NULL, NULL);
- pa_stream_unref(pa_stream_);
- pa_stream_ = NULL;
- }
-
- if (pa_context_) {
- pa_context_disconnect(pa_context_);
- pa_context_set_state_callback(pa_context_, NULL, NULL);
- pa_context_unref(pa_context_);
- pa_context_ = NULL;
- }
- }
-
- pa_threaded_mainloop_stop(pa_mainloop_);
- pa_threaded_mainloop_free(pa_mainloop_);
- pa_mainloop_ = NULL;
-}
-
-void PulseAudioOutputStream::Close() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- Reset();
-
- // Signal to the manager that we're closed and can be removed.
- // This should be the last call in the function as it deletes "this".
- manager_->ReleaseOutputStream(this);
-}
-
-int PulseAudioOutputStream::GetHardwareLatencyInBytes() {
- int negative = 0;
- pa_usec_t pa_latency_micros = 0;
- if (pa_stream_get_latency(pa_stream_, &pa_latency_micros, &negative) != 0)
- return 0;
-
- if (negative)
- return 0;
-
- return (pa_latency_micros * params_.sample_rate() *
- params_.GetBytesPerFrame()) / base::Time::kMicrosecondsPerSecond;
-}
-
-void PulseAudioOutputStream::FulfillWriteRequest(size_t requested_bytes) {
- CHECK_EQ(requested_bytes, static_cast<size_t>(params_.GetBytesPerBuffer()));
-
- int frames_filled = 0;
- if (source_callback_) {
- frames_filled = source_callback_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, GetHardwareLatencyInBytes()));
- }
-
- // Zero any unfilled data so it plays back as silence.
- if (frames_filled < audio_bus_->frames()) {
- audio_bus_->ZeroFramesPartial(
- frames_filled, audio_bus_->frames() - frames_filled);
- }
-
- // PulseAudio won't always be able to provide a buffer large enough, so we may
- // need to request multiple buffers and fill them individually.
- int current_frame = 0;
- size_t bytes_remaining = requested_bytes;
- while (bytes_remaining > 0) {
- void* buffer = NULL;
- size_t bytes_to_fill = bytes_remaining;
- CHECK_GE(pa_stream_begin_write(pa_stream_, &buffer, &bytes_to_fill), 0);
-
- // In case PulseAudio gives us a bigger buffer than we want, cap our size.
- bytes_to_fill = std::min(
- std::min(bytes_remaining, bytes_to_fill),
- static_cast<size_t>(params_.GetBytesPerBuffer()));
-
- int frames_to_fill = bytes_to_fill / params_.GetBytesPerFrame();;
-
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->ToInterleavedPartial(
- current_frame, frames_to_fill, params_.bits_per_sample() / 8, buffer);
- media::AdjustVolume(buffer, bytes_to_fill, params_.channels(),
- params_.bits_per_sample() / 8, volume_);
-
- if (pa_stream_write(pa_stream_, buffer, bytes_to_fill, NULL, 0LL,
- PA_SEEK_RELATIVE) < 0) {
- if (source_callback_) {
- source_callback_->OnError(this, pa_context_errno(pa_context_));
- }
- }
-
- bytes_remaining -= bytes_to_fill;
- current_frame = frames_to_fill;
- }
-}
-
-void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- CHECK(callback);
- CHECK(pa_stream_);
-
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Ensure the context and stream are ready.
- if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
- pa_stream_get_state(pa_stream_) != PA_STREAM_READY) {
- callback->OnError(this, pa_context_errno(pa_context_));
- return;
- }
-
- source_callback_ = callback;
-
- // Uncork (resume) the stream.
- WaitForPulseOperation(pa_stream_cork(
- pa_stream_, 0, &StreamSuccessCallback, this));
-}
-
-void PulseAudioOutputStream::Stop() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- // Cork (pause) the stream. Waiting for the main loop lock will ensure
- // outstanding callbacks have completed.
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Flush the stream prior to cork, doing so after will cause hangs. Write
- // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
- // is all thread safe.
- WaitForPulseOperation(pa_stream_flush(
- pa_stream_, &StreamSuccessCallback, this));
-
- WaitForPulseOperation(pa_stream_cork(
- pa_stream_, 1, &StreamSuccessCallback, this));
-
- source_callback_ = NULL;
-}
-
-void PulseAudioOutputStream::SetVolume(double volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- volume_ = static_cast<float>(volume);
-}
-
-void PulseAudioOutputStream::GetVolume(double* volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- *volume = volume_;
-}
-
-void PulseAudioOutputStream::WaitForPulseOperation(pa_operation* op) {
- CHECK(op);
- while (pa_operation_get_state(op) == PA_OPERATION_RUNNING) {
- pa_threaded_mainloop_wait(pa_mainloop_);
- }
- pa_operation_unref(op);
-}
-
-} // namespace media
diff --git a/src/media/audio/pulse/pulse_output.h b/src/media/audio/pulse/pulse_output.h
deleted file mode 100644
index cdd7cfd..0000000
--- a/src/media/audio/pulse/pulse_output.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Creates an audio output stream based on the PulseAudio asynchronous API;
-// specifically using the pa_threaded_mainloop model.
-//
-// If the stream is successfully opened, Close() must be called before the
-// stream is deleted as Close() is responsible for ensuring resource cleanup
-// occurs.
-//
-// This object is designed so that all AudioOutputStream methods will be called
-// on the same thread that created the object.
-//
-// WARNING: This object blocks on internal PulseAudio calls in Open() while
-// waiting for PulseAudio's context structure to be ready. It also blocks in
-// inside PulseAudio in Start() and repeated during playback, waiting for
-// PulseAudio write callbacks to occur.
-
-#ifndef MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
-#define MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-struct pa_context;
-struct pa_operation;
-struct pa_stream;
-struct pa_threaded_mainloop;
-
-namespace media {
-class AudioManagerBase;
-
-class PulseAudioOutputStream : public AudioOutputStream {
- public:
- PulseAudioOutputStream(const AudioParameters& params,
- AudioManagerBase* manager);
-
- virtual ~PulseAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- // Called by PulseAudio when |pa_context_| and |pa_stream_| change state. If
- // an unexpected failure state change happens and |source_callback_| is set
- // these methods will forward the error via OnError().
- static void ContextNotifyCallback(pa_context* c, void* p_this);
- static void StreamNotifyCallback(pa_stream* s, void* p_this);
-
- // Triggers pa_threaded_mainloop_signal() to avoid deadlocks.
- static void StreamSuccessCallback(pa_stream* s, int success, void* p_this);
-
- // Called by PulseAudio when it needs more audio data.
- static void StreamRequestCallback(pa_stream* s, size_t len, void* p_this);
-
- // Fulfill a write request from the write request callback. Outputs silence
- // if the request could not be fulfilled.
- void FulfillWriteRequest(size_t requested_bytes);
-
- // Close() helper function to free internal structs.
- void Reset();
-
- // Returns the current hardware latency value in bytes.
- int GetHardwareLatencyInBytes();
-
- // Helper method for waiting on Pulse Audio operations to complete.
- void WaitForPulseOperation(pa_operation* op);
-
- // AudioParameters from the constructor.
- const AudioParameters params_;
-
- // Audio manager that created us. Used to report that we've closed.
- AudioManagerBase* manager_;
-
- // PulseAudio API structs.
- pa_context* pa_context_;
- pa_threaded_mainloop* pa_mainloop_;
- pa_stream* pa_stream_;
-
- // Float representation of volume from 0.0 to 1.0.
- float volume_;
-
- // Callback to audio data source. Must only be modified while holding a lock
- // on |pa_mainloop_| via pa_threaded_mainloop_lock().
- AudioSourceCallback* source_callback_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
diff --git a/src/media/audio/sample_rates.cc b/src/media/audio/sample_rates.cc
deleted file mode 100644
index a082a93..0000000
--- a/src/media/audio/sample_rates.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/sample_rates.h"
-
-namespace media {
-
-AudioSampleRate AsAudioSampleRate(int sample_rate) {
- switch (sample_rate) {
- case 8000: return k8000Hz;
- case 16000: return k16000Hz;
- case 32000: return k32000Hz;
- case 48000: return k48000Hz;
- case 96000: return k96000Hz;
- case 11025: return k11025Hz;
- case 22050: return k22050Hz;
- case 44100: return k44100Hz;
- case 88200: return k88200Hz;
- case 176400: return k176400Hz;
- case 192000: return k192000Hz;
- }
- return kUnexpectedAudioSampleRate;
-}
-
-} // namespace media
diff --git a/src/media/audio/sample_rates.h b/src/media/audio/sample_rates.h
deleted file mode 100644
index 7c29e54..0000000
--- a/src/media/audio/sample_rates.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SAMPLE_RATES_H_
-#define MEDIA_AUDIO_SAMPLE_RATES_H_
-
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Enumeration used for histogramming sample rates into distinct buckets.
-// Logged to UMA, so never reuse a value, always add new/greater ones!
-enum AudioSampleRate {
- k8000Hz = 0,
- k16000Hz = 1,
- k32000Hz = 2,
- k48000Hz = 3,
- k96000Hz = 4,
- k11025Hz = 5,
- k22050Hz = 6,
- k44100Hz = 7,
- k88200Hz = 8,
- k176400Hz = 9,
- k192000Hz = 10,
- kUnexpectedAudioSampleRate // Must always be last!
-};
-
-// Helper method to convert integral values to their respective enum values,
-// or kUnexpectedAudioSampleRate if no match exists.
-MEDIA_EXPORT AudioSampleRate AsAudioSampleRate(int sample_rate);
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SAMPLE_RATES_H_
diff --git a/src/media/audio/scoped_loop_observer.cc b/src/media/audio/scoped_loop_observer.cc
deleted file mode 100644
index 1332b07..0000000
--- a/src/media/audio/scoped_loop_observer.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/scoped_loop_observer.h"
-
-#include "base/bind.h"
-#include "base/synchronization/waitable_event.h"
-
-namespace media {
-
-ScopedLoopObserver::ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& loop)
- : loop_(loop) {
- ObserveLoopDestruction(true, NULL);
-}
-
-ScopedLoopObserver::~ScopedLoopObserver() {
- ObserveLoopDestruction(false, NULL);
-}
-
-void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
- base::WaitableEvent* done) {
- // Note: |done| may be NULL.
- if (loop_->BelongsToCurrentThread()) {
- MessageLoop* loop = MessageLoop::current();
- if (enable) {
- loop->AddDestructionObserver(this);
- } else {
- loop->RemoveDestructionObserver(this);
- }
- } else {
- base::WaitableEvent event(false, false);
- if (loop_->PostTask(FROM_HERE,
- base::Bind(&ScopedLoopObserver::ObserveLoopDestruction,
- base::Unretained(this), enable, &event))) {
- event.Wait();
- } else {
- // The message loop's thread has already terminated, so no need to wait.
- }
- }
-
- if (done)
- done->Signal();
-}
-
-} // namespace media.
diff --git a/src/media/audio/scoped_loop_observer.h b/src/media/audio/scoped_loop_observer.h
deleted file mode 100644
index 659c68b..0000000
--- a/src/media/audio/scoped_loop_observer.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-#define MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/message_loop.h"
-#include "base/message_loop_proxy.h"
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace media {
-
-// A common base class for AudioOutputDevice and AudioInputDevice that manages
-// a message loop pointer and monitors it for destruction. If the object goes
-// out of scope before the message loop, the object will automatically remove
-// itself from the message loop's list of destruction observers.
-// NOTE: The class that inherits from this class must implement the
-// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
-class ScopedLoopObserver
- : public MessageLoop::DestructionObserver {
- public:
- explicit ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
-
- protected:
- virtual ~ScopedLoopObserver();
-
- // Accessor to the loop that's used by the derived class.
- const scoped_refptr<base::MessageLoopProxy>& message_loop() { return loop_; }
-
- private:
- // Call to add or remove ourselves from the list of destruction observers for
- // the message loop.
- void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
-
- // A pointer to the message loop's proxy. In case the loop gets destroyed
- // before this object goes out of scope, PostTask etc will fail but not crash.
- scoped_refptr<base::MessageLoopProxy> loop_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedLoopObserver);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
diff --git a/src/media/audio/shared_memory_util.cc b/src/media/audio/shared_memory_util.cc
deleted file mode 100644
index b65df03..0000000
--- a/src/media/audio/shared_memory_util.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/shared_memory_util.h"
-
-#include "base/atomicops.h"
-#include "base/logging.h"
-
-using base::subtle::Atomic32;
-
-static const uint32 kUnknownDataSize = static_cast<uint32>(-1);
-
-namespace media {
-
-uint32 TotalSharedMemorySizeInBytes(uint32 packet_size) {
- // Need to reserve extra 4 bytes for size of data.
- return packet_size + sizeof(Atomic32);
-}
-
-uint32 PacketSizeInBytes(uint32 shared_memory_created_size) {
- return shared_memory_created_size - sizeof(Atomic32);
-}
-
-uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Actual data size stored at the end of the buffer.
- uint32 actual_data_size =
- base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
- return std::min(actual_data_size, packet_size);
-}
-
-void SetActualDataSizeInBytes(void* shared_memory_ptr,
- uint32 packet_size,
- uint32 actual_data_size) {
- char* ptr = static_cast<char*>(shared_memory_ptr) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Set actual data size at the end of the buffer.
- base::subtle::Release_Store(reinterpret_cast<volatile Atomic32*>(ptr),
- actual_data_size);
-}
-
-void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size,
- uint32 actual_data_size) {
- SetActualDataSizeInBytes(shared_memory->memory(),
- packet_size, actual_data_size);
-}
-
-void SetUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- SetActualDataSizeInBytes(shared_memory, packet_size, kUnknownDataSize);
-}
-
-bool IsUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size) {
- char* ptr = static_cast<char*>(shared_memory->memory()) + packet_size;
- DCHECK_EQ(0u, reinterpret_cast<size_t>(ptr) & 3);
-
- // Actual data size stored at the end of the buffer.
- uint32 actual_data_size =
- base::subtle::Acquire_Load(reinterpret_cast<volatile Atomic32*>(ptr));
- return actual_data_size == kUnknownDataSize;
-}
-
-} // namespace media
diff --git a/src/media/audio/shared_memory_util.h b/src/media/audio/shared_memory_util.h
deleted file mode 100644
index 2ae6ffe..0000000
--- a/src/media/audio/shared_memory_util.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
-#define MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
-
-#include "base/basictypes.h"
-#include "base/shared_memory.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Value sent by the controller to the renderer in low-latency mode
-// indicating that the stream is paused.
-enum { kPauseMark = -1 };
-
-// Functions that handle data buffer passed between processes in the shared
-// memory. Called on both IPC sides. These are necessary because the shared
-// memory has a layout: the last word in the block is the data size in bytes.
-
-MEDIA_EXPORT uint32 TotalSharedMemorySizeInBytes(uint32 packet_size);
-MEDIA_EXPORT uint32 PacketSizeInBytes(uint32 shared_memory_created_size);
-MEDIA_EXPORT uint32 GetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size);
-MEDIA_EXPORT void SetActualDataSizeInBytes(base::SharedMemory* shared_memory,
- uint32 packet_size,
- uint32 actual_data_size);
-MEDIA_EXPORT void SetActualDataSizeInBytes(void* shared_memory_ptr,
- uint32 packet_size,
- uint32 actual_data_size);
-MEDIA_EXPORT void SetUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size);
-MEDIA_EXPORT bool IsUnknownDataSize(base::SharedMemory* shared_memory,
- uint32 packet_size);
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHARED_MEMORY_UTIL_H_
diff --git a/src/media/audio/shell_audio_sink.cc b/src/media/audio/shell_audio_sink.cc
deleted file mode 100644
index e46d3f7..0000000
--- a/src/media/audio/shell_audio_sink.cc
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/audio/shell_audio_sink.h"
-
-#include <limits>
-
-#include "base/bind.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/audio_bus.h"
-#include "media/base/shell_media_statistics.h"
-#include "media/filters/shell_audio_renderer.h"
-#include "media/mp4/aac.h"
-
-#if defined(OS_STARBOARD)
-#include "starboard/configuration.h"
-#endif // defined(OS_STARBOARD)
-
-namespace {
-
-scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> s_audio_sink_buffer;
-size_t s_audio_sink_buffer_size_in_float;
-
-} // namespace
-
-namespace media {
-
-void AudioSinkSettings::Reset(const ShellAudioStreamer::Config& config,
- const AudioParameters& audio_parameters) {
- config_ = config;
- audio_parameters_ = audio_parameters;
-}
-
-const ShellAudioStreamer::Config& AudioSinkSettings::config() const {
- return config_;
-}
-
-const AudioParameters& AudioSinkSettings::audio_parameters() const {
- return audio_parameters_;
-}
-
-int AudioSinkSettings::channels() const {
- return audio_parameters_.channels();
-}
-
-int AudioSinkSettings::per_channel_frames(AudioBus* audio_bus) const {
- return audio_bus->frames() * sizeof(float) /
- (config_.interleaved() ? channels() : 1) /
- (audio_parameters_.bits_per_sample() / 8);
-}
-
-// static
-ShellAudioSink* ShellAudioSink::Create(ShellAudioStreamer* audio_streamer) {
- return new ShellAudioSink(audio_streamer);
-}
-
-ShellAudioSink::ShellAudioSink(ShellAudioStreamer* audio_streamer)
- : render_callback_(NULL),
- pause_requested_(true),
- rebuffering_(true),
- rebuffer_num_frames_(0),
- render_frame_cursor_(0),
- output_frame_cursor_(0),
- audio_streamer_(audio_streamer) {
- buffer_factory_ = ShellBufferFactory::Instance();
-}
-
-ShellAudioSink::~ShellAudioSink() {
- if (render_callback_) {
- DCHECK(!audio_streamer_->HasStream(this));
- }
-}
-
-void ShellAudioSink::Initialize(const AudioParameters& params,
- RenderCallback* callback) {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Initialize()");
- DCHECK(!render_callback_);
- DCHECK(params.bits_per_sample() == 16 || params.bits_per_sample() == 32);
-
- render_callback_ = callback;
- audio_parameters_ = params;
-
- streamer_config_ = audio_streamer_->GetConfig();
- settings_.Reset(streamer_config_, params);
-
- // Creating the audio bus
- size_t per_channel_size_in_float =
- streamer_config_.sink_buffer_size_in_frames_per_channel() *
- audio_parameters_.bits_per_sample() / (8 * sizeof(float));
- size_t audio_bus_buffer_size_in_float =
- settings_.channels() * per_channel_size_in_float;
- if (audio_bus_buffer_size_in_float > s_audio_sink_buffer_size_in_float) {
- s_audio_sink_buffer_size_in_float = audio_bus_buffer_size_in_float;
- // free the existing memory first so we have more free memory for the
- // allocation following.
- s_audio_sink_buffer.reset(NULL);
- s_audio_sink_buffer.reset(static_cast<float*>(
- base::AlignedAlloc(s_audio_sink_buffer_size_in_float * sizeof(float),
- AudioBus::kChannelAlignment)));
- if (!s_audio_sink_buffer) {
- DLOG(ERROR) << "couldn't reallocate sink buffer";
- render_callback_->OnRenderError();
- return;
- }
- }
-
- if (streamer_config_.interleaved()) {
- audio_bus_ = AudioBus::WrapMemory(
- 1, settings_.channels() * per_channel_size_in_float,
- s_audio_sink_buffer.get());
- } else {
- audio_bus_ =
- AudioBus::WrapMemory(settings_.channels(), per_channel_size_in_float,
- s_audio_sink_buffer.get());
- }
-
- if (!audio_bus_) {
- NOTREACHED() << "couldn't create sink buffer";
- render_callback_->OnRenderError();
- return;
- }
-
- rebuffer_num_frames_ =
- streamer_config_.initial_rebuffering_frames_per_channel();
- renderer_audio_bus_ = AudioBus::CreateWrapper(audio_bus_->channels());
-}
-
-void ShellAudioSink::Start() {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Start()");
- DCHECK(render_callback_);
-
- if (!audio_streamer_->HasStream(this)) {
- pause_requested_ = true;
- rebuffering_ = true;
- audio_streamer_->StopBackgroundMusic();
- audio_streamer_->AddStream(this);
- DCHECK(audio_streamer_->HasStream(this));
- }
-}
-
-void ShellAudioSink::Stop() {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Stop()");
- // It is possible that Stop() is called before Initialize() is called. In
- // this case the audio_streamer_ will not be able to check if it has the
- // stream as audio_parameters_ hasn't been initialized.
- if (render_callback_ && audio_streamer_->HasStream(this)) {
- audio_streamer_->RemoveStream(this);
- pause_requested_ = true;
- rebuffering_ = true;
- render_frame_cursor_ = 0;
- output_frame_cursor_ = 0;
- }
-}
-
-void ShellAudioSink::Pause(bool flush) {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Pause()");
- // clear consumption of data on the mixer.
- pause_requested_ = true;
- if (flush) {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Pause() flushing.");
- // remove and re-add the stream to flush
- audio_streamer_->RemoveStream(this);
- rebuffering_ = true;
- render_frame_cursor_ = 0;
- output_frame_cursor_ = 0;
- audio_streamer_->AddStream(this);
- }
-}
-
-void ShellAudioSink::Play() {
- TRACE_EVENT0("media_stack", "ShellAudioSink::Play()");
- // clear flag on mixer callback, will start to consume more data
- pause_requested_ = false;
-}
-
-bool ShellAudioSink::SetVolume(double volume) {
- return audio_streamer_->SetVolume(this, volume);
-}
-
-void ShellAudioSink::ResumeAfterUnderflow(bool buffer_more_audio) {
- // only rebuffer when paused, we access state variables non atomically
- DCHECK(pause_requested_);
- DCHECK(rebuffering_);
-
- if (!buffer_more_audio)
- return;
-
- rebuffer_num_frames_ = std::min<int>(
- rebuffer_num_frames_ * 2, settings_.per_channel_frames(audio_bus_.get()));
-}
-
-bool ShellAudioSink::PauseRequested() const {
- return pause_requested_ || rebuffering_;
-}
-
-bool ShellAudioSink::PullFrames(uint32_t* offset_in_frame,
- uint32_t* total_frames) {
- TRACE_EVENT0("media_stack", "ShellAudioSink::PullFrames()");
- // with a valid render callback
- DCHECK(render_callback_);
-
- uint32_t dummy_offset_in_frame, dummy_total_frames;
- if (!offset_in_frame)
- offset_in_frame = &dummy_offset_in_frame;
- if (!total_frames)
- total_frames = &dummy_total_frames;
-
- *total_frames = render_frame_cursor_ - output_frame_cursor_;
- uint32 free_frames =
- settings_.per_channel_frames(audio_bus_.get()) - *total_frames;
- // Number of ms of buffered playback remaining
- uint32_t buffered_time =
- (*total_frames * 1000 / audio_parameters_.sample_rate());
- if (free_frames >= mp4::AAC::kFramesPerAccessUnit) {
- SetupRenderAudioBus();
-
- int frames_rendered =
- render_callback_->Render(renderer_audio_bus_.get(), buffered_time);
- // 0 indicates the read is still pending. Positive number is # of frames
- // rendered, negative number indicates an error.
- if (frames_rendered > 0) {
- // +ve value indicates number of samples in a successful read
- // TODO: We cannot guarantee this on platforms that use a resampler. Check
- // if it is possible to move the resample into the streamer.
- // DCHECK_EQ(frames_rendered, mp4::AAC::kFramesPerAccessUnit);
- render_frame_cursor_ += frames_rendered;
- *total_frames += frames_rendered;
- free_frames -= frames_rendered;
- }
- } else {
- render_callback_->Render(NULL, buffered_time);
- }
-
- bool buffer_full = free_frames < mp4::AAC::kFramesPerAccessUnit;
- DCHECK_LE(*total_frames,
- static_cast<uint32>(std::numeric_limits<int32>::max()));
- bool rebuffer_threshold_reached =
- static_cast<int>(*total_frames) >= rebuffer_num_frames_;
- if (rebuffering_ && (buffer_full || rebuffer_threshold_reached)) {
- render_callback_->SinkFull();
- rebuffering_ = false;
- }
-
-#if defined(OS_STARBOARD)
-#if SB_IS(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
-
-#define MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK 1
-
-#endif // SB_IS(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
-#endif // #if defined(OS_STARBOARD)
-
-#if defined(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
- const size_t kUnderflowThreshold = mp4::AAC::kFramesPerAccessUnit / 2;
- if (*total_frames < kUnderflowThreshold) {
- if (!rebuffering_) {
- rebuffering_ = true;
- render_callback_->SinkUnderflow();
- UPDATE_MEDIA_STATISTICS(STAT_TYPE_AUDIO_UNDERFLOW, 0);
- }
- }
- *offset_in_frame =
- output_frame_cursor_ % settings_.per_channel_frames(audio_bus_.get());
- return !PauseRequested();
-#else // defined(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
- rebuffering_ = true;
- *offset_in_frame =
- output_frame_cursor_ % settings_.per_channel_frames(audio_bus_.get());
- if (pause_requested_) {
- return false;
- }
- return true;
-#endif // defined(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
-}
-
-void ShellAudioSink::ConsumeFrames(uint32_t frame_played) {
- TRACE_EVENT1("media_stack", "ShellAudioSink::ConsumeFrames()", "audio_clock",
- (output_frame_cursor_ * 1000) / audio_parameters_.sample_rate());
- // Called by the Streamer thread to indicate where the hardware renderer
- // is in playback
- if (frame_played > 0) {
- // advance our output cursor by the number of frames we're returning
- // update audio clock, used for jitter calculations
- output_frame_cursor_ += frame_played;
- DCHECK_LE(output_frame_cursor_, render_frame_cursor_);
- }
-}
-
-AudioBus* ShellAudioSink::GetAudioBus() {
- return audio_bus_.get();
-}
-
-const AudioParameters& ShellAudioSink::GetAudioParameters() const {
- return audio_parameters_;
-}
-
-void ShellAudioSink::SetupRenderAudioBus() {
- // check for buffer wraparound, hopefully rare
- int render_frame_position =
- render_frame_cursor_ % settings_.per_channel_frames(audio_bus_.get());
- int requested_frames = mp4::AAC::kFramesPerAccessUnit;
- if (render_frame_position + requested_frames >
- settings_.per_channel_frames(audio_bus_.get())) {
- requested_frames =
- settings_.per_channel_frames(audio_bus_.get()) - render_frame_position;
- }
- // calculate the offset into the buffer where we'd like to store these data
- if (streamer_config_.interleaved()) {
- uint8* channel_data = reinterpret_cast<uint8*>(audio_bus_->channel(0));
- uint8* channel_offset = channel_data +
- render_frame_position *
- audio_parameters_.bits_per_sample() / 8 *
- settings_.channels();
- // setup the AudioBus to pass to the renderer
- renderer_audio_bus_->SetChannelData(
- 0, reinterpret_cast<float*>(channel_offset));
- renderer_audio_bus_->set_frames(requested_frames *
- audio_parameters_.bits_per_sample() / 8 /
- sizeof(float) * settings_.channels());
- } else {
- for (int i = 0; i < audio_bus_->channels(); ++i) {
- uint8* channel_data = reinterpret_cast<uint8*>(audio_bus_->channel(i));
- uint8* channel_offset =
- channel_data +
- render_frame_position * audio_parameters_.bits_per_sample() / 8;
- renderer_audio_bus_->SetChannelData(
- i, reinterpret_cast<float*>(channel_offset));
- }
- renderer_audio_bus_->set_frames(requested_frames *
- audio_parameters_.bits_per_sample() / 8 /
- sizeof(float));
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/shell_audio_sink.h b/src/media/audio/shell_audio_sink.h
deleted file mode 100644
index c2eca08..0000000
--- a/src/media/audio/shell_audio_sink.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_SHELL_AUDIO_SINK_H_
-#define MEDIA_AUDIO_SHELL_AUDIO_SINK_H_
-
-#include "base/threading/thread.h"
-#include "media/base/audio_renderer_sink.h"
-#include "media/audio/shell_audio_streamer.h"
-#include "media/base/shell_buffer_factory.h"
-
-namespace media {
-
-// This class is used to manage the complexity of audio settings as the audio
-// settings are determined by the original audio data (stereo, 5.1, etc. )
-// and by the decoder (Some decoders decode mono into stereo) and hardware
-// (some hardware requires audio data to be interleaved but others might
-// require it to be non-interleaved.
-class AudioSinkSettings {
- public:
- AudioSinkSettings() {}
-
- void Reset(const ShellAudioStreamer::Config& config,
- const AudioParameters& audio_parameters);
- const ShellAudioStreamer::Config& config() const;
- const AudioParameters& audio_parameters() const;
-
- int channels() const;
- int per_channel_frames(AudioBus* audio_bus) const;
-
- private:
- ShellAudioStreamer::Config config_;
- AudioParameters audio_parameters_;
-};
-
-// platform-specific implementation of an audio endpoint.
-class MEDIA_EXPORT ShellAudioSink : NON_EXPORTED_BASE(public AudioRendererSink),
- NON_EXPORTED_BASE(public ShellAudioStream) {
- public:
- ShellAudioSink(ShellAudioStreamer* audio_streamer);
- virtual ~ShellAudioSink();
-
- // static factory method
- static ShellAudioSink* Create(ShellAudioStreamer* audio_streamer);
-
- // AudioRendererSink implementation
- void Initialize(const AudioParameters& params,
- RenderCallback* callback) OVERRIDE;
- void Start() OVERRIDE;
- void Stop() OVERRIDE;
- void Pause(bool flush) OVERRIDE;
- void Play() OVERRIDE;
- bool SetVolume(double volume) OVERRIDE;
- void ResumeAfterUnderflow(bool buffer_more_audio) OVERRIDE;
-
- // ShellAudioStream implementation
- bool PauseRequested() const OVERRIDE;
- bool PullFrames(uint32_t* offset_in_frame, uint32_t* total_frames) OVERRIDE;
- void ConsumeFrames(uint32_t frame_played) OVERRIDE;
- const AudioParameters& GetAudioParameters() const OVERRIDE;
- AudioBus* GetAudioBus() OVERRIDE;
-
- private:
- // Config the audio bus that will be sent to the AudioRenderer. It reueses
- // the memory occupied by the sink audio bus (audio_bus_).
- void SetupRenderAudioBus();
-
- AudioParameters audio_parameters_;
- RenderCallback* render_callback_;
-
- scoped_ptr<AudioBus> audio_bus_;
-
- // Used as a paremeter when calling render_callback_->Render().
- // We can only construct it through a static Create method that does a heap
- // allocate so it is a member variable to avoid a heap allocation each
- // frame.
- scoped_ptr<AudioBus> renderer_audio_bus_;
-
- bool pause_requested_;
- bool rebuffering_;
- // Number of frames to rebuffer before calling SinkFull
- int rebuffer_num_frames_;
-
- // number of samples have been loaded into audio_bus from the Renderer
- // (and may have been played and since been overwritten by newer samples)
- uint64_t render_frame_cursor_;
- // advanced by ConsumeSamples() as the Streamer reports playback advancing
- uint64_t output_frame_cursor_;
-
- scoped_refptr<ShellBufferFactory> buffer_factory_;
- ShellAudioStreamer* audio_streamer_;
- ShellAudioStreamer::Config streamer_config_;
-
- AudioSinkSettings settings_;
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHELL_AUDIO_SINK_H_
diff --git a/src/media/audio/shell_audio_sink_unittest.cc b/src/media/audio/shell_audio_sink_unittest.cc
deleted file mode 100644
index 60995c1..0000000
--- a/src/media/audio/shell_audio_sink_unittest.cc
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/audio/shell_audio_sink.h"
-
-#include <algorithm>
-
-#include "media/audio/mock_shell_audio_streamer.h"
-#include "media/base/mock_filters.h"
-#include "media/mp4/aac.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-typedef media::ShellAudioStreamer::Config Config;
-
-// TODO: Add 32 bits per sample tests.
-
-namespace {
-
-using namespace testing;
-
-const uint32 kMaxHardwareChannelsStereo = 2;
-const size_t kFramesPerAccessUnit = media::mp4::AAC::kFramesPerAccessUnit;
-
-bool operator==(const media::AudioParameters& params1,
- const media::AudioParameters& params2) {
- return params1.channels() == params2.channels() &&
- params1.bits_per_sample() == params2.bits_per_sample() &&
- params1.sample_rate() == params2.sample_rate();
-}
-
-void InterleavedFill(size_t frames_per_channel,
- int channels,
- int bytes_per_sample,
- uint8* data,
- uint8* fill_byte) {
- while (frames_per_channel) {
- for (int i = 0; i < channels; ++i) {
- for (int j = 0; j < bytes_per_sample; ++j) {
- *data = *fill_byte + j;
- ++data;
- }
- }
- *fill_byte += bytes_per_sample;
- --frames_per_channel;
- }
-}
-
-void InterleavedVerify(size_t frames_per_channel,
- int channels,
- int bytes_per_sample,
- const uint8* data,
- uint8* fill_byte) {
- while (frames_per_channel) {
- for (int i = 0; i < channels; ++i) {
- for (int j = 0; j < bytes_per_sample; ++j) {
- ASSERT_EQ(*data, static_cast<uint8>(*fill_byte + j));
- ++data;
- }
- }
- *fill_byte += bytes_per_sample;
- --frames_per_channel;
- }
-}
-
-class MockRenderCallback : public media::AudioRendererSink::RenderCallback {
- public:
- MockRenderCallback() {}
-
- MOCK_METHOD2(Render, int(media::AudioBus*, int));
- MOCK_METHOD3(RenderIO, void(media::AudioBus*, media::AudioBus*, int));
- MOCK_METHOD0(OnRenderError, void());
- MOCK_METHOD0(SinkFull, void());
-
- MOCK_METHOD0(SinkUnderflow, void());
-
- DISALLOW_COPY_AND_ASSIGN(MockRenderCallback);
-};
-
-class ShellAudioSinkTest : public testing::Test {
- public:
- ShellAudioSinkTest() { media::ShellBufferFactory::Initialize(); }
-
- virtual ~ShellAudioSinkTest() { media::ShellBufferFactory::Terminate(); }
-
- void Configure(const Config& config) {
- render_byte_ = 0;
- consumption_byte_ = 0;
- render_bytes_num_ = 0;
- consumption_bytes_num_ = 0;
- render_frames_per_channel_ = 0;
- consumption_frames_per_channel_ = 0;
-
- sink_ = media::ShellAudioSink::Create(&streamer_);
- EXPECT_CALL(streamer_, GetConfig()).WillRepeatedly(Return(config));
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- EXPECT_CALL(render_callback_, Render(_, _)).WillRepeatedly(Return(0));
- EXPECT_CALL(render_callback_, SinkFull()).Times(AnyNumber());
- EXPECT_CALL(render_callback_, SinkUnderflow()).Times(AnyNumber());
- }
-
- void FillAudioBus(int frames_per_channel, media::AudioBus* audio_bus) {
- Config config = streamer_.GetConfig();
- media::AudioParameters params = sink_->GetAudioParameters();
- int bytes_per_channel = frames_per_channel * params.bits_per_sample() / 8;
- int channels = params.channels();
- if (config.interleaved()) {
- bytes_per_channel *= channels;
- }
- ASSERT_LE(bytes_per_channel, audio_bus->frames() * sizeof(float));
-
- if (config.interleaved()) {
- ASSERT_EQ(audio_bus->channels(), 1);
- InterleavedFill(
- frames_per_channel, channels, params.bits_per_sample() / 8,
- reinterpret_cast<uint8*>(audio_bus->channel(0)), &render_byte_);
- } else {
- ASSERT_EQ(audio_bus->channels(), channels);
- uint8 render_byte;
- for (int i = 0; i < channels; ++i) {
- render_byte = render_byte_;
- InterleavedFill(frames_per_channel, 1, params.bits_per_sample() / 8,
- reinterpret_cast<uint8*>(audio_bus->channel(i)),
- &render_byte);
- }
- render_byte_ = render_byte;
- }
-
- render_bytes_num_ += channels * bytes_per_channel;
- render_frames_per_channel_ += frames_per_channel;
- }
-
- void Consume(int frames_per_channel) {
- Config config = streamer_.GetConfig();
- media::AudioParameters params = sink_->GetAudioParameters();
- media::AudioSinkSettings settings;
- media::AudioBus* audio_bus = sink_->GetAudioBus();
- int bytes_per_channel = frames_per_channel * params.bits_per_sample() / 8;
-
- settings.Reset(config, params);
- if (config.interleaved()) {
- bytes_per_channel *= settings.channels();
- }
- ASSERT_LE(bytes_per_channel, audio_bus->frames() * sizeof(float));
-
- uint32_t offset_in_frame, total_frames;
- EXPECT_CALL(render_callback_, Render(_, _)).WillOnce(Return(0));
- sink_->PullFrames(&offset_in_frame, &total_frames);
-
- EXPECT_LE(frames_per_channel, total_frames);
-
- int frames_to_request =
- std::min<int>(frames_per_channel,
- settings.per_channel_frames(audio_bus) - offset_in_frame);
- if (config.interleaved()) {
- ASSERT_EQ(audio_bus->channels(), 1);
- uint8* data =
- reinterpret_cast<uint8*>(audio_bus->channel(0)) +
- offset_in_frame * params.bits_per_sample() / 8 * settings.channels();
- InterleavedVerify(frames_to_request, settings.channels(),
- params.bits_per_sample() / 8, data, &consumption_byte_);
- } else {
- ASSERT_EQ(audio_bus->channels(), settings.channels());
- uint8 consumption_byte;
- for (int i = 0; i < settings.channels(); ++i) {
- consumption_byte = consumption_byte_;
- uint8* data = reinterpret_cast<uint8*>(audio_bus->channel(i)) +
- offset_in_frame * params.bits_per_sample() / 8;
- InterleavedVerify(frames_to_request, 1, params.bits_per_sample() / 8,
- data, &consumption_byte);
- }
- consumption_byte_ = consumption_byte;
- }
-
- frames_to_request = frames_per_channel - frames_to_request;
- if (frames_to_request != 0) {
- if (config.interleaved()) {
- ASSERT_EQ(audio_bus->channels(), 1);
- uint8* data = reinterpret_cast<uint8*>(audio_bus->channel(0));
- InterleavedVerify(frames_to_request, settings.channels(),
- params.bits_per_sample() / 8, data,
- &consumption_byte_);
- } else {
- ASSERT_EQ(audio_bus->channels(), settings.channels());
- uint8 consumption_byte;
- for (int i = 0; i < settings.channels(); ++i) {
- consumption_byte = consumption_byte_;
- uint8* data = reinterpret_cast<uint8*>(audio_bus->channel(i));
- InterleavedVerify(frames_to_request, 1, params.bits_per_sample() / 8,
- data, &consumption_byte);
- }
- consumption_byte_ = consumption_byte;
- }
- }
-
- sink_->ConsumeFrames(frames_per_channel);
-
- consumption_bytes_num_ += settings.channels() * bytes_per_channel;
- consumption_frames_per_channel_ += frames_per_channel;
- }
-
- bool AllConsumed() {
- uint32_t offset_in_frame, total_frames;
- EXPECT_CALL(render_callback_, Render(_, _))
- .Times(AtLeast(0))
- .WillRepeatedly(Return(0));
- sink_->PullFrames(&offset_in_frame, &total_frames);
-
- return total_frames == 0 && render_byte_ == consumption_byte_ &&
- render_bytes_num_ == consumption_bytes_num_ &&
- render_frames_per_channel_ == consumption_frames_per_channel_;
- }
-
- // ==== Test Fixture Members
- media::MockShellAudioStreamer streamer_;
- MockRenderCallback render_callback_;
- scoped_refptr<media::ShellAudioSink> sink_;
-
- uint8 render_byte_;
- uint8 consumption_byte_;
- int render_bytes_num_;
- int consumption_bytes_num_;
- int render_frames_per_channel_;
- int consumption_frames_per_channel_;
-};
-
-// Verify the frame count of audio_bus
-ACTION_P3(VerifyAudioBusFrameCount, config, init_params, frames_per_channel) {
- media::AudioBus* audio_bus = arg0;
- int bytes_per_channel =
- frames_per_channel * init_params.bits_per_sample() / 8;
- if (config.interleaved()) {
- int channels = init_params.channels();
- bytes_per_channel *= channels;
- }
- EXPECT_EQ(bytes_per_channel, audio_bus->frames() * sizeof(float));
- return 0;
-}
-
-// Verify the frame count of audio_bus
-ACTION_P2(RenderAudioBus, frames_per_channel, sink_test) {
- media::AudioBus* audio_bus = arg0;
- sink_test->FillAudioBus(frames_per_channel, audio_bus);
- return frames_per_channel;
-}
-
-TEST_F(ShellAudioSinkTest, Prerequisites) {
- uint8 data[48000] = {0};
- uint8 render_byte = 0, consume_byte = 0;
- uint8 verify_data[] = {0x00, 0x01, 0x02, 0x00, 0x01, 0x02, 0x00, 0x01, 0x02,
- 0x03, 0x04, 0x05, 0x03, 0x04, 0x05, 0x03, 0x04, 0x05};
- InterleavedFill(2, 3, 3, data, &render_byte);
- EXPECT_EQ(memcmp(verify_data, data, 9), 0);
- EXPECT_EQ(memcmp(data + 18, data + 19, 1024), 0);
-
- for (size_t frames_per_channel = 1; frames_per_channel < 301;
- frames_per_channel += 3) {
- for (int channels = 1; channels < 5; ++channels) {
- for (int bytes_per_sample = 1; bytes_per_sample < 5; ++bytes_per_sample) {
- render_byte = consume_byte = 0;
- InterleavedFill(frames_per_channel, channels, bytes_per_sample, data,
- &render_byte);
- InterleavedVerify(frames_per_channel, channels, bytes_per_sample, data,
- &consume_byte);
- ASSERT_EQ(render_byte, consume_byte);
- }
- }
- }
-}
-
-TEST_F(ShellAudioSinkTest, Initialize) {
- const uint32 initial_rebuffering_frames_per_channel = 2048;
- const uint32 sink_buffer_size_in_frames_per_channel = 8192;
-
- // 2 configurations with different interleaved
- for (int i = 0; i < 2; ++i) {
- for (media::ChannelLayout layout = media::CHANNEL_LAYOUT_MONO;
- layout != media::CHANNEL_LAYOUT_MAX;
- layout = static_cast<media::ChannelLayout>(layout + 1)) {
- for (int bytes_per_sample = 2; bytes_per_sample < 5;
- bytes_per_sample *= 2) {
- Config config(i == 0 ? Config::INTERLEAVED : Config::PLANAR,
- initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel,
- kMaxHardwareChannelsStereo, bytes_per_sample,
- 48000 /* output_sample_rate */);
-
- Configure(config);
- EXPECT_TRUE(!sink_->GetAudioBus());
- media::AudioParameters init_params = media::AudioParameters(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, layout, 48000,
- bytes_per_sample * 8, 1024);
- sink_->Initialize(init_params, &render_callback_);
-
- EXPECT_TRUE(sink_->PauseRequested());
- EXPECT_TRUE(sink_->GetAudioBus());
-
- media::AudioBus* audio_bus = sink_->GetAudioBus();
- media::AudioParameters params = sink_->GetAudioParameters();
- int expected_channels = init_params.channels();
- if (config.interleaved()) {
- EXPECT_EQ(audio_bus->channels(), 1);
- EXPECT_EQ(audio_bus->frames(),
- config.sink_buffer_size_in_frames_per_channel() *
- expected_channels * bytes_per_sample / sizeof(float));
- EXPECT_EQ(params.channels(), init_params.channels());
- } else {
- EXPECT_EQ(audio_bus->channels(), expected_channels);
- EXPECT_EQ(audio_bus->frames(),
- config.sink_buffer_size_in_frames_per_channel() *
- bytes_per_sample / sizeof(float));
- EXPECT_EQ(params.channels(), init_params.channels());
- }
-
- EXPECT_EQ(params.bits_per_sample(), init_params.bits_per_sample());
- EXPECT_EQ(params.sample_rate(), init_params.sample_rate());
- }
- }
- }
-}
-
-TEST_F(ShellAudioSinkTest, StartAndStop) {
- const uint32 initial_rebuffering_frames_per_channel = 2048;
- const uint32 sink_buffer_size_in_frames_per_channel = 8192;
-
- Config config(
- Config::INTERLEAVED, initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, kMaxHardwareChannelsStereo,
- sizeof(int16_t) /* bytes_per_sample */, 48000 /* output_sample_rate */);
-
- media::AudioParameters init_params =
- media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO, 48000, 16, 1024);
-
- Configure(config);
-
- sink_->Initialize(init_params, &render_callback_);
-
- InSequence seq;
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(false));
- EXPECT_CALL(streamer_, AddStream(sink_.get())).WillOnce(Return(true));
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- sink_->Start();
- EXPECT_TRUE(sink_->PauseRequested());
- EXPECT_CALL(streamer_, RemoveStream(sink_.get())).WillOnce(Return());
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(false));
- sink_->Stop();
- EXPECT_TRUE(sink_->PauseRequested());
-}
-
-TEST_F(ShellAudioSinkTest, RenderNoFrames) {
- const uint32 initial_rebuffering_frames_per_channel = 2048;
- const uint32 sink_buffer_size_in_frames_per_channel = 8192;
-
- Config config(
- Config::INTERLEAVED, initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, kMaxHardwareChannelsStereo,
- sizeof(int16_t) /* bytes_per_sample */, 48000 /* output_sample_rate */);
-
- Configure(config);
-
- media::AudioParameters init_params =
- media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO, 48000, 16, 1024);
-
- sink_->Initialize(init_params, &render_callback_);
-
- InSequence seq;
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(false));
- EXPECT_CALL(streamer_, AddStream(sink_.get())).WillOnce(Return(true));
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- sink_->Start();
-
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
- uint32_t offset_in_frame, total_frames;
- EXPECT_FALSE(sink_->PullFrames(&offset_in_frame, &total_frames));
-
- EXPECT_CALL(streamer_, RemoveStream(sink_.get())).WillOnce(Return());
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(false));
- sink_->Stop();
-}
-
-TEST_F(ShellAudioSinkTest, RenderFrames) {
- const uint32 initial_rebuffering_frames_per_channel = 2048;
- const uint32 sink_buffer_size_in_frames_per_channel = 8192;
-
- for (int i = 0; i < 2; ++i) {
- Config config(
- i == 0 ? Config::INTERLEAVED : Config::PLANAR,
- initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, kMaxHardwareChannelsStereo,
- sizeof(int16_t) /* bytes_per_sample */, 48000 /* output_sample_rate */);
-
- Configure(config);
-
- media::AudioParameters init_params =
- media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO, 48000, 16, 1024);
-
- sink_->Initialize(init_params, &render_callback_);
-
- InSequence seq;
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- EXPECT_CALL(streamer_, AddStream(sink_.get())).WillOnce(Return(true));
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- sink_->Start();
-
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(16, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- uint32_t offset_in_frame, total_frames;
- EXPECT_CALL(render_callback_, Render(_, _)).WillOnce(Return(0));
- EXPECT_FALSE(sink_->PullFrames(&offset_in_frame, &total_frames));
- EXPECT_EQ(offset_in_frame, 0);
- EXPECT_EQ(total_frames, 16);
-
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(8, this));
- EXPECT_FALSE(sink_->PullFrames(&offset_in_frame, &total_frames));
- EXPECT_EQ(offset_in_frame, 0);
- EXPECT_EQ(total_frames, 24);
-
- Consume(8);
-
- EXPECT_CALL(render_callback_, Render(_, _)).WillOnce(Return(0));
- EXPECT_FALSE(sink_->PullFrames(&offset_in_frame, &total_frames));
- EXPECT_EQ(offset_in_frame, 8);
- EXPECT_EQ(total_frames, 16);
-
- Consume(16);
-
- EXPECT_TRUE(AllConsumed());
-
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- EXPECT_CALL(streamer_, RemoveStream(sink_.get())).WillOnce(Return());
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- sink_->Stop();
- }
-}
-
-TEST_F(ShellAudioSinkTest, RenderRequestSizeAkaAudioBusFrames) {
- const uint32 initial_rebuffering_frames_per_channel = 2048;
- const uint32 sink_buffer_size_in_frames_per_channel = 2048;
-
- for (int i = 0; i < 2; ++i) {
- Config config(
- i == 0 ? Config::INTERLEAVED : Config::PLANAR,
- initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, kMaxHardwareChannelsStereo,
- sizeof(int16_t) /* bytes_per_sample */, 48000 /* output_sample_rate */);
- Configure(config);
-
- media::AudioParameters init_params =
- media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO, 48000, 16, 1024);
-
- sink_->Initialize(init_params, &render_callback_);
-
- InSequence seq;
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- EXPECT_CALL(streamer_, AddStream(sink_.get())).WillOnce(Return(true));
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- sink_->Start();
-
- for (int i = 0; i < 10; ++i) {
- // Try to get 1024 frames but don't give it any data
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(VerifyAudioBusFrameCount(config, init_params,
- kFramesPerAccessUnit));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Ok, now give it 1024 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(1024, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Try to get another 1024 frames but don't give it any data
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(VerifyAudioBusFrameCount(config, init_params,
- kFramesPerAccessUnit));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Ok, now give it 480 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(480, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Consume 1024 frames, leave 1568 frames space but only 544 are
- // continuous
- Consume(1024);
-
- // It still only has room for 544 continuous frames, don't give it any
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(VerifyAudioBusFrameCount(config, init_params, 544));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Ok, now give it 544 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(544, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Now it has room for another 1024 frames, don't give it
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(VerifyAudioBusFrameCount(config, init_params, 1024));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Ok, now give it 1024 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(1024, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Consume 2048 frames
- Consume(2048);
-
- // Give it another 64 and then consume 64 to ensure we can get into the
- // next iteration of the loop with empty buffer
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(1024, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
- Consume(1024);
-
- EXPECT_TRUE(AllConsumed());
- }
-
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- EXPECT_CALL(streamer_, RemoveStream(sink_.get())).WillOnce(Return());
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- sink_->Stop();
- }
-}
-
-TEST_F(ShellAudioSinkTest, ResumeAfterUnderflow) {
- const uint32 initial_rebuffering_frames_per_channel = 1024;
- const uint32 sink_buffer_size_in_frames_per_channel = 2048;
-
- for (int i = 0; i < 2; ++i) {
- Config config(
- i == 0 ? Config::INTERLEAVED : Config::PLANAR,
- initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, kMaxHardwareChannelsStereo,
- sizeof(int16_t) /* bytes_per_sample */, 48000 /* output_sample_rate */);
- Configure(config);
-
- media::AudioParameters init_params =
- media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_MONO, 48000, 16, 1024);
-
- sink_->Initialize(init_params, &render_callback_);
-
- InSequence seq;
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- EXPECT_CALL(streamer_, AddStream(sink_.get())).WillOnce(Return(true));
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- sink_->Start();
-
- // Render 64 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(64, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Render another 64 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(64, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- // Consume 112 frames, leave 16 frames left
- Consume(112);
-
- // Render another 16 frames
- EXPECT_CALL(render_callback_, Render(_, _))
- .WillOnce(RenderAudioBus(16, this));
- EXPECT_FALSE(sink_->PullFrames(NULL, NULL));
-
- Consume(32);
-
- EXPECT_TRUE(AllConsumed());
-
- EXPECT_CALL(streamer_, HasStream(sink_.get())).WillRepeatedly(Return(true));
- EXPECT_CALL(streamer_, RemoveStream(sink_.get())).WillOnce(Return());
- EXPECT_CALL(streamer_, HasStream(sink_.get()))
- .WillRepeatedly(Return(false));
- sink_->Stop();
- }
-}
-
-} // namespace
diff --git a/src/media/audio/shell_audio_streamer.h b/src/media/audio/shell_audio_streamer.h
deleted file mode 100644
index ca529b3..0000000
--- a/src/media/audio/shell_audio_streamer.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_SHELL_AUDIO_STREAMER_H_
-#define MEDIA_AUDIO_SHELL_AUDIO_STREAMER_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "media/mp4/aac.h"
-
-namespace media {
-
-class AudioBus;
-class AudioParameters;
-
-// Abstract class for adding an audio stream to the audio streamer.
-// Your class can implement this interface and then call AddStream(this) to
-// attach itself to the hardware audio streamer.
-class ShellAudioStream {
- public:
- // Checks if "Pause" has been requested on this stream. The streamer will
- // halt playback the next time it updates.
- virtual bool PauseRequested() const = 0;
- // This function serves several purposes:
- // 1. Once the audio stream is added to the streamer. This function will be
- // called periodically so the stream (the AudioSink) can pull data from
- // upper level, even when it is paused.
- // 2. It will return true to indicate that it is playing, false to pause.
- // The frame referred in this function is not an AAC frame but a PCM frame. It
- // contains a group of samples start at the same timestamp, each of them are
- // from different channels of a multi-channel audio stream.
- // NOTE: This function can be called on a low level audio mixer thread and
- // is LATENCY-SENSITIVE. Avoid locks and other high-latency operations!
- virtual bool PullFrames(uint32_t* offset_in_frame,
- uint32_t* total_frames) = 0;
- // This function tells the stream that `frame_played` of audio frames have
- // been played and can be removed from the buffer. The stream can also use
- // this to calculate the time elapsed. The stream shouldn't pull any data
- // in this function, PullFrames is the only point to pull data.
- virtual void ConsumeFrames(uint32_t frame_played) = 0;
- // Get the AudioParameters for this stream
- virtual const AudioParameters& GetAudioParameters() const = 0;
- // Get the internal buffer of this audio stream as an AudioBus.
- virtual AudioBus* GetAudioBus() = 0;
-};
-
-// The class contains stub functions for platform specific audio playback.
-// Classes inherited from it have to implement all the pure virtual functions
-// and provide implementations for the static functions.
-class ShellAudioStreamer {
- public:
- class Config {
- public:
- static const uint32 kInvalidSampleRate = 0;
-
- enum StorageMode { INTERLEAVED, PLANAR };
-
- Config() : valid_(false) {}
-
- // Initialize the Config settings, see the comment on individual member
- // below for more details.
- Config(StorageMode storage_mode,
- uint32 initial_rebuffering_frames_per_channel,
- uint32 sink_buffer_size_in_frames_per_channel,
- uint32 max_hardware_channels,
- uint32 bytes_per_sample,
- uint32 native_output_sample_rate = kInvalidSampleRate)
- : valid_(true),
- interleaved_(storage_mode == INTERLEAVED),
- initial_rebuffering_frames_per_channel_(
- initial_rebuffering_frames_per_channel),
- sink_buffer_size_in_frames_per_channel_(
- sink_buffer_size_in_frames_per_channel),
- max_hardware_channels_(max_hardware_channels),
- bytes_per_sample_(bytes_per_sample),
- native_output_sample_rate_(native_output_sample_rate) {
- const size_t kFramesPerAccessUnit = mp4::AAC::kFramesPerAccessUnit;
- DCHECK_LE(initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel);
- DCHECK_EQ(initial_rebuffering_frames_per_channel % kFramesPerAccessUnit,
- 0);
- DCHECK_EQ(sink_buffer_size_in_frames_per_channel % kFramesPerAccessUnit,
- 0);
- }
-
- bool interleaved() const {
- AssertValid();
- return interleaved_;
- }
- uint32 initial_rebuffering_frames_per_channel() const {
- AssertValid();
- return initial_rebuffering_frames_per_channel_;
- }
- uint32 sink_buffer_size_in_frames_per_channel() const {
- AssertValid();
- return sink_buffer_size_in_frames_per_channel_;
- }
- uint32 max_hardware_channels() const {
- AssertValid();
- return max_hardware_channels_;
- }
- uint32 bytes_per_sample() const {
- AssertValid();
- return bytes_per_sample_;
- }
- uint32 native_output_sample_rate() const {
- AssertValid();
- return native_output_sample_rate_;
- }
-
- private:
- void AssertValid() const { DCHECK(valid_); }
-
- bool valid_;
-
- // Is the data in audio bus interleaved and stored as one channel.
- bool interleaved_;
- // The following parameter controls the sink rebuffering.
- // See ShellAudioSink::ResumeAfterUnderflow for more details.
- uint32 initial_rebuffering_frames_per_channel_;
- uint32 sink_buffer_size_in_frames_per_channel_;
- // Max channels the current audio hardware can render. This can be changed
- // during the running of the application as the user can plug/unplug
- // different devices. So it represent the current status on the time of
- // query.
- uint32 max_hardware_channels_;
- uint32 bytes_per_sample_;
- uint32 native_output_sample_rate_;
- };
-
- struct OutputDevice {
- std::string connector;
- uint32 latency_ms;
- std::string coding_type;
- uint32 number_of_channels;
- uint32 sampling_frequency;
- };
-
- ShellAudioStreamer() {}
- virtual ~ShellAudioStreamer(){};
-
- // The only instance of the platform specific audio streamer. It becomes
- // valid after calling Initialize and become NULL after calling Terminate.
- static ShellAudioStreamer* Instance();
- static void Initialize();
- static void Terminate();
-
- virtual Config GetConfig() const = 0;
- virtual bool AddStream(ShellAudioStream* stream) = 0;
- virtual void RemoveStream(ShellAudioStream* stream) = 0;
- virtual bool HasStream(ShellAudioStream* stream) const = 0;
- virtual bool SetVolume(ShellAudioStream* stream, double volume) = 0;
- // Some consoles have background music tracks playing even when other apps
- // are running. This function can be used to stop the background music.
- virtual void StopBackgroundMusic() {}
- // Returns the available audio output devices. This function is for
- // informational purpose and is currently only used to create
- // h5vcc::AudioConfig.
- virtual std::vector<OutputDevice> GetOutputDevices() {
- return std::vector<OutputDevice>();
- }
-
- DISALLOW_COPY_AND_ASSIGN(ShellAudioStreamer);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHELL_AUDIO_STREAMER_H_
diff --git a/src/media/audio/shell_audio_streamer_linux.cc b/src/media/audio/shell_audio_streamer_linux.cc
deleted file mode 100644
index cd35b97..0000000
--- a/src/media/audio/shell_audio_streamer_linux.cc
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/audio/shell_audio_streamer_linux.h"
-
-#include "base/logging.h"
-#include "lb_platform.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/shell_pulse_audio.h"
-#include "media/base/audio_bus.h"
-#include "media/mp4/aac.h"
-
-namespace media {
-
-namespace {
-
-ShellAudioStreamerLinux* instance = NULL;
-
-} // namespace
-
-class PulseAudioHost : public ShellPulseAudioStream::Host {
- public:
- PulseAudioHost(ShellPulseAudioContext* pulse_audio_context,
- ShellAudioStream* stream,
- int rate,
- int channels);
- ~PulseAudioHost();
- virtual void RequestFrame(size_t length, WriteFunc write) OVERRIDE;
-
- private:
- enum StreamState {
- STATE_INVALID,
- STATE_PAUSED, // Voice is paused, will play when unpaused
- STATE_RUNNING, // Voice is playing, reading new data when possible
- };
-
- ShellPulseAudioContext* pulse_audio_context_;
- int channels_;
- uint32 played_frames_; // frames played by the audio driver
- uint32 written_frames_; // frames written to the audio driver
- StreamState state_;
- ShellAudioStream* lb_audio_stream_;
- ShellPulseAudioStream* pulse_audio_stream_;
-};
-
-ShellAudioStreamer::Config ShellAudioStreamerLinux::GetConfig() const {
- const uint32 initial_rebuffering_frames_per_channel =
- mp4::AAC::kFramesPerAccessUnit * 32;
- const uint32 sink_buffer_size_in_frames_per_channel =
- initial_rebuffering_frames_per_channel * 8;
- const uint32 max_hardware_channels = 2;
-
- return Config(Config::INTERLEAVED, initial_rebuffering_frames_per_channel,
- sink_buffer_size_in_frames_per_channel, max_hardware_channels,
- sizeof(float) /* bytes_per_sample */);
-}
-
-bool ShellAudioStreamerLinux::AddStream(ShellAudioStream* stream) {
- base::AutoLock lock(streams_lock_);
-
- if (pulse_audio_context_ == NULL) {
- pulse_audio_context_.reset(new ShellPulseAudioContext());
- bool result = pulse_audio_context_->Initialize();
- if (!result) {
- pulse_audio_context_.reset();
- DLOG(WARNING) << "Failed to initialize pulse audio.";
- return false;
- }
- }
-
- // other basic checks, it is assumed that the decoder or renderer algorithm
- // will have rejected invalid configurations before creating a sink, so
- // here they are asserts instead of run-time errors
- const AudioParameters& params = stream->GetAudioParameters();
- DCHECK(params.channels() == 1 || params.channels() == 2);
- DCHECK_EQ(params.bits_per_sample(), 32);
-
- const AudioParameters& audio_parameters = stream->GetAudioParameters();
- const int sample_rate = audio_parameters.sample_rate();
-
- streams_[stream] = new PulseAudioHost(pulse_audio_context_.get(), stream,
- sample_rate, params.channels());
-
- return true;
-}
-
-void ShellAudioStreamerLinux::RemoveStream(ShellAudioStream* stream) {
- base::AutoLock lock(streams_lock_);
-
- StreamMap::iterator it = streams_.find(stream);
- if (it == streams_.end())
- return;
- delete it->second;
- streams_.erase(it);
-
- if (streams_.empty()) {
- pulse_audio_context_.reset();
- }
-}
-
-bool ShellAudioStreamerLinux::HasStream(ShellAudioStream* stream) const {
- base::AutoLock lock(streams_lock_);
- return streams_.find(stream) != streams_.end();
-}
-
-bool ShellAudioStreamerLinux::SetVolume(ShellAudioStream* stream,
- double volume) {
- if (volume != 1.0) {
- NOTIMPLEMENTED();
- }
- return volume != 1.0;
-}
-
-ShellAudioStreamerLinux::ShellAudioStreamerLinux()
- : streams_value_deleter_(&streams_) {
- instance = this;
-}
-
-ShellAudioStreamerLinux::~ShellAudioStreamerLinux() {
- DCHECK(streams_.empty());
- instance = NULL;
-}
-
-PulseAudioHost::PulseAudioHost(ShellPulseAudioContext* pulse_audio_context,
- ShellAudioStream* stream,
- int rate,
- int channels)
- : channels_(channels),
- pulse_audio_context_(pulse_audio_context),
- played_frames_(0),
- written_frames_(0),
- state_(STATE_PAUSED),
- lb_audio_stream_(stream) {
- pulse_audio_stream_ = pulse_audio_context->CreateStream(this, rate, channels);
-}
-
-PulseAudioHost::~PulseAudioHost() {
- if (pulse_audio_stream_) {
- pulse_audio_context_->DestroyStream(pulse_audio_stream_);
- }
-}
-
-void PulseAudioHost::RequestFrame(size_t length, WriteFunc write) {
- uint64 time_played = pulse_audio_stream_->GetPlaybackCursorInMicroSeconds();
- int sample_rate = lb_audio_stream_->GetAudioParameters().sample_rate();
- uint64 frames_played = time_played * sample_rate / 1000000;
- uint32 frame_consumed = 0;
- uint32 frame_pulled;
-
- if (frames_played > written_frames_)
- frames_played = written_frames_;
- if (frames_played > played_frames_)
- frame_consumed = frames_played - played_frames_;
- played_frames_ += frame_consumed;
-
- // Our samples are in floats.
- const int kBytesPerFrame = sizeof(float) * channels_;
- DCHECK_EQ(length % kBytesPerFrame, 0);
- length /= kBytesPerFrame;
- const AudioBus* audio_bus = lb_audio_stream_->GetAudioBus();
-
- lb_audio_stream_->ConsumeFrames(frame_consumed);
- lb_audio_stream_->PullFrames(NULL, &frame_pulled);
-
- if (played_frames_ + frame_pulled > written_frames_ && length) {
- frame_pulled = played_frames_ + frame_pulled - written_frames_;
- frame_pulled = std::min<size_t>(frame_pulled, length);
-
- uint32 frames = audio_bus->frames() / channels_;
- uint32 frame_offset = written_frames_ % frames;
-
- uint32 frame_to_write =
- std::min<size_t>(frame_pulled, frames - frame_offset);
- const float* buffer = audio_bus->channel(0) + frame_offset * channels_;
- write.Run(reinterpret_cast<const uint8*>(buffer),
- frame_to_write * kBytesPerFrame);
- written_frames_ += frame_to_write;
- }
-
- switch (state_) {
- case STATE_PAUSED:
- if (!lb_audio_stream_->PauseRequested()) {
- pulse_audio_stream_->Play();
- state_ = STATE_RUNNING;
- }
- break;
- case STATE_RUNNING:
- if (lb_audio_stream_->PauseRequested()) {
- pulse_audio_stream_->Pause();
- state_ = STATE_PAUSED;
- }
- break;
- case STATE_INVALID:
- break;
- }
-}
-
-void ShellAudioStreamer::Initialize() {
- CHECK(!instance);
- new ShellAudioStreamerLinux();
-}
-
-void ShellAudioStreamer::Terminate() {
- CHECK(instance);
- delete instance;
- instance = NULL;
-}
-
-ShellAudioStreamer* ShellAudioStreamer::Instance() {
- CHECK(instance);
- return instance;
-}
-
-} // namespace media
diff --git a/src/media/audio/shell_audio_streamer_linux.h b/src/media/audio/shell_audio_streamer_linux.h
deleted file mode 100644
index 06d9e38..0000000
--- a/src/media/audio/shell_audio_streamer_linux.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2013 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_SHELL_AUDIO_STREAMER_LINUX_H_
-#define MEDIA_AUDIO_SHELL_AUDIO_STREAMER_LINUX_H_
-
-#include <map>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/stl_util.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/shell_audio_streamer.h"
-#include "media/audio/shell_pulse_audio.h"
-
-namespace media {
-
-class PulseAudioHost;
-
-class ShellAudioStreamerLinux : public ShellAudioStreamer {
- public:
- ShellAudioStreamerLinux();
- ~ShellAudioStreamerLinux();
-
- virtual Config GetConfig() const OVERRIDE;
- virtual bool AddStream(ShellAudioStream* stream) OVERRIDE;
- virtual void RemoveStream(ShellAudioStream* stream) OVERRIDE;
- virtual bool HasStream(ShellAudioStream* stream) const OVERRIDE;
- virtual bool SetVolume(ShellAudioStream* stream, double volume) OVERRIDE;
-
- private:
- typedef std::map<ShellAudioStream*, PulseAudioHost*> StreamMap;
- StreamMap streams_;
- scoped_ptr<ShellPulseAudioContext> pulse_audio_context_;
- STLValueDeleter<StreamMap> streams_value_deleter_;
- mutable base::Lock streams_lock_;
-
- DISALLOW_COPY_AND_ASSIGN(ShellAudioStreamerLinux);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHELL_AUDIO_STREAMER_LINUX_H_
diff --git a/src/media/audio/shell_pulse_audio.cc b/src/media/audio/shell_pulse_audio.cc
deleted file mode 100644
index 561a24f..0000000
--- a/src/media/audio/shell_pulse_audio.cc
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "media/audio/shell_pulse_audio.h"
-
-#include "base/bind.h"
-
-namespace media {
-
-ShellPulseAudioStream::ShellPulseAudioStream()
- : context_(NULL),
- latency_(kMinLatency),
- stream_(NULL),
- last_request_size_(0),
- host_(NULL) {
-}
-
-ShellPulseAudioStream::~ShellPulseAudioStream() {
- if (stream_) {
- pa_stream_set_write_callback(stream_, NULL, NULL);
- pa_stream_set_underflow_callback(stream_, NULL, NULL);
- pa_stream_disconnect(stream_);
- pa_stream_unref(stream_);
- }
-}
-
-bool ShellPulseAudioStream::Initialize(ShellPulseAudioContext* context,
- Host* host, int rate, int channel) {
- context_ = context;
- host_ = host;
- sample_spec_.rate = rate;
- sample_spec_.channels = channel;
- sample_spec_.format = PA_SAMPLE_FLOAT32LE;
-
- stream_ = pa_stream_new(context_->GetContext(), "Playback", &sample_spec_,
- NULL);
- if (!stream_)
- return false;
-
- pa_stream_set_write_callback(stream_, RequestCallback, this);
- pa_stream_set_underflow_callback(stream_, UnderflowCallback, this);
- buf_attr_.fragsize = ~0;
- buf_attr_.maxlength = pa_usec_to_bytes(latency_, &sample_spec_);
- buf_attr_.minreq = pa_usec_to_bytes(0, &sample_spec_);
- buf_attr_.prebuf = ~0;
- buf_attr_.tlength = buf_attr_.maxlength;
-
- const pa_stream_flags_t kNoLatency =
- static_cast<pa_stream_flags_t>(PA_STREAM_INTERPOLATE_TIMING |
- PA_STREAM_AUTO_TIMING_UPDATE |
- PA_STREAM_START_CORKED);
- const pa_stream_flags_t kWithLatency =
- static_cast<pa_stream_flags_t>(kNoLatency | PA_STREAM_ADJUST_LATENCY);
- if (pa_stream_connect_playback(
- stream_, NULL, &buf_attr_, kWithLatency, NULL, NULL) >= 0) {
- return true;
- }
-
- // Try again without latency flag.
- if (pa_stream_connect_playback(
- stream_, NULL, &buf_attr_, kNoLatency, NULL, NULL) >= 0) {
- return true;
- }
- pa_stream_unref(stream_);
- stream_ = NULL;
- return false;
-}
-
-bool ShellPulseAudioStream::Play() {
- return Cork(false);
-}
-
-bool ShellPulseAudioStream::Pause() {
- return Cork(true);
-}
-
-uint64 ShellPulseAudioStream::GetPlaybackCursorInMicroSeconds() {
- pa_usec_t usec = 0;
- if (pa_stream_get_time(stream_, &usec) == 0)
- return usec;
- return 0;
-}
-
-void ShellPulseAudioStream::RequestFrame() {
- host_->RequestFrame(
- last_request_size_, base::Bind(&ShellPulseAudioStream::WriteFrame,
- base::Unretained(this)));
-}
-
-void ShellPulseAudioStream::RequestCallback(pa_stream* s, size_t length,
- void* userdata) {
- ShellPulseAudioStream* stream = static_cast<ShellPulseAudioStream*>(userdata);
- stream->HandleRequest(length);
-}
-
-void ShellPulseAudioStream::HandleRequest(size_t length) {
- last_request_size_ = length;
-}
-
-void ShellPulseAudioStream::UnderflowCallback(pa_stream* s, void* userdata) {
- ShellPulseAudioStream* stream = static_cast<ShellPulseAudioStream*>(userdata);
- stream->HandleUnderflow();
-}
-
-void ShellPulseAudioStream::HandleUnderflow() {
- if (latency_ < kMaxLatency) {
- latency_ *= 2;
- if (latency_ > kMaxLatency)
- latency_ = kMaxLatency;
- buf_attr_.maxlength = pa_usec_to_bytes(latency_, &sample_spec_);
- buf_attr_.tlength = buf_attr_.maxlength;
- pa_stream_set_buffer_attr(stream_, &buf_attr_, NULL, NULL);
- }
-}
-
-bool ShellPulseAudioStream::Cork(bool pause) {
- pa_stream_cork(stream_, pause, SuccessCallback, NULL);
- return true;
-}
-
-void ShellPulseAudioStream::SuccessCallback(pa_stream* s, int success,
- void* userdata) {
-}
-
-void ShellPulseAudioStream::WriteFrame(const uint8* data, size_t size) {
- DCHECK_LE(size, last_request_size_);
- if (size != 0)
- pa_stream_write(stream_, data, size, NULL, 0LL, PA_SEEK_RELATIVE);
- last_request_size_ -= size;
-}
-
-ShellPulseAudioContext::ShellPulseAudioContext()
- : mainloop_(NULL),
- context_(NULL),
- pulse_thread_("PulseAudioThread") {
-}
-
-ShellPulseAudioContext::~ShellPulseAudioContext() {
- pulse_thread_.Stop();
- DCHECK(streams_.empty());
- if (context_) {
- pa_context_disconnect(context_);
- pa_context_unref(context_);
- }
- if (mainloop_) {
- pa_mainloop_free(mainloop_);
- }
- if (pulse_thread_.IsRunning()) {
- pulse_thread_.Stop();
- }
-}
-
-bool ShellPulseAudioContext::Initialize() {
- mainloop_ = pa_mainloop_new();
- context_ = pa_context_new(pa_mainloop_get_api(mainloop_), "ShellPulseAudio");
-
- // Set the state callback. This will be called from with pa_mainloop_iterate.
- int pa_ready = kInitial;
- pa_context_set_state_callback(context_, StateCallback, &pa_ready);
-
- // Try to connect to the context, or return on failure.
- if (pa_context_connect(context_, NULL, pa_context_flags_t(), NULL) < 0) {
- DLOG(ERROR) << "Error connecting to context.";
- return false;
- }
-
- // Wait until the context is ready.
- while (pa_ready == kInitial) {
- pa_mainloop_iterate(mainloop_, 1, NULL);
- }
-
- // Clear the state callback.
- pa_context_set_state_callback(context_, NULL, NULL);
-
- if (pa_ready == kReady) {
- base::Thread::Options options;
- options.priority = base::kThreadPriority_RealtimeAudio;
- pulse_thread_.StartWithOptions(options);
- pulse_thread_.message_loop()->PostTask(FROM_HERE,
- base::Bind(&ShellPulseAudioContext::Iterate, base::Unretained(this)));
- }
-
- return pa_ready == kReady;
-}
-
-pa_context* ShellPulseAudioContext::GetContext() {
- return context_;
-}
-
-void ShellPulseAudioContext::Iterate() {
- base::AutoLock lock(lock_);
- pulse_thread_.message_loop()->PostDelayedTask(
- FROM_HERE, base::Bind(&ShellPulseAudioContext::Iterate,
- base::Unretained(this)),
- base::TimeDelta::FromMilliseconds(5));
- pa_mainloop_iterate(mainloop_, 0, NULL);
-
- for (Streams::iterator iter = streams_.begin();
- iter != streams_.end(); ++iter) {
- (*iter)->RequestFrame();
- }
-}
-
-ShellPulseAudioStream* ShellPulseAudioContext::CreateStream(
- ShellPulseAudioStream::Host* host, int rate, int channel) {
- base::AutoLock lock(lock_);
-
- ShellPulseAudioStream* stream = new ShellPulseAudioStream;
- bool result = stream->Initialize(this, host, rate, channel);
- DCHECK(result);
- streams_.insert(stream);
- return stream;
-}
-
-void ShellPulseAudioContext::DestroyStream(ShellPulseAudioStream* stream) {
- base::AutoLock lock(lock_);
- DCHECK(streams_.find(stream) != streams_.end());
- streams_.erase(streams_.find(stream));
- delete stream;
-}
-
-void ShellPulseAudioContext::StateCallback(pa_context* c, void* userdata) {
- int* pa_ready = static_cast<int*>(userdata);
-
- switch (pa_context_get_state(c)) {
- case PA_CONTEXT_FAILED:
- case PA_CONTEXT_TERMINATED:
- *pa_ready = kError;
- break;
- case PA_CONTEXT_READY:
- *pa_ready = kReady;
- break;
- default:
- break;
- }
-}
-
-} // namespace media
-
diff --git a/src/media/audio/shell_pulse_audio.h b/src/media/audio/shell_pulse_audio.h
deleted file mode 100644
index 6692eba..0000000
--- a/src/media/audio/shell_pulse_audio.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright 2014 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef MEDIA_AUDIO_SHELL_PULSE_AUDIO_H_
-#define MEDIA_AUDIO_SHELL_PULSE_AUDIO_H_
-
-#include <pulse/pulseaudio.h>
-
-#include <set>
-
-#include "base/synchronization/lock.h"
-#include "base/threading/thread.h"
-
-namespace media {
-
-class ShellPulseAudioContext;
-
-class ShellPulseAudioStream {
- public:
- class Host {
- public:
- typedef base::Callback<void(const uint8*, size_t)> WriteFunc;
- virtual ~Host() {}
- virtual void RequestFrame(size_t length, WriteFunc write) = 0;
- };
-
- ShellPulseAudioStream();
- ~ShellPulseAudioStream();
-
- bool Initialize(ShellPulseAudioContext* context, Host* host, int rate,
- int channel);
- bool Play();
- bool Pause();
- uint64 GetPlaybackCursorInMicroSeconds();
- void RequestFrame();
-
- private:
- enum {
- kInitial = 0,
- kSuccess,
- kFailure
- };
-
- static const int kMinLatency = 1000000;
- static const int kMaxLatency = 4000000;
-
- static void RequestCallback(pa_stream* s, size_t length, void* userdata);
- void HandleRequest(size_t length);
- static void UnderflowCallback(pa_stream* s, void* userdata);
- void HandleUnderflow();
- static void SuccessCallback(pa_stream* s, int success, void* userdata);
- bool Cork(bool pause);
-
- void WriteFrame(const uint8* data, size_t size);
-
- ShellPulseAudioContext* context_;
- int latency_;
- pa_buffer_attr buf_attr_;
- pa_sample_spec sample_spec_;
- pa_stream* stream_;
- size_t last_request_size_;
- Host* host_;
-
- DISALLOW_COPY_AND_ASSIGN(ShellPulseAudioStream);
-};
-
-class ShellPulseAudioContext {
- public:
- ShellPulseAudioContext();
- ~ShellPulseAudioContext();
-
- bool Initialize();
- pa_context* GetContext();
- void Iterate();
-
- ShellPulseAudioStream* CreateStream(ShellPulseAudioStream::Host* host,
- int rate, int channel);
- void DestroyStream(ShellPulseAudioStream* stream);
-
- base::Lock& lock() { return lock_; }
-
- private:
- typedef std::set<ShellPulseAudioStream*> Streams;
- enum {
- kInitial = 0,
- kReady,
- kError
- };
-
- static void StateCallback(pa_context* c, void* userdata);
-
- pa_mainloop* mainloop_;
- pa_context* context_;
- base::Thread pulse_thread_;
-
- Streams streams_;
- base::Lock lock_;
-
- DISALLOW_COPY_AND_ASSIGN(ShellPulseAudioContext);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SHELL_PULSE_AUDIO_H_
diff --git a/src/media/audio/shell_wav_test_probe.cc b/src/media/audio/shell_wav_test_probe.cc
deleted file mode 100644
index f4197a1..0000000
--- a/src/media/audio/shell_wav_test_probe.cc
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/audio/shell_wav_test_probe.h"
-
-#include <string>
-
-#include "base/file_path.h"
-#include "base/file_util.h"
-#include "base/logging.h"
-#include "base/path_service.h"
-#include "base/platform_file.h"
-#include "media/base/endian_util.h"
-#include "media/filters/shell_demuxer.h"
-
-// don't include me in release builds please
-#if !defined(__LB_SHELL__FOR_RELEASE__)
-
-static const uint16 kWavFormatCodePCM = 0x0001;
-static const uint16 kWavFormatCodeIEEEFloat = 0x0003;
-// "RIFF" in ASCII (big-endian)
-static const uint32 kWav_RIFF = 0x52494646;
-// "WAVE" in ASCII (big-endian)
-static const uint32 kWav_WAVE = 0x57415645;
-// "fmt " in ASCII (big-endian)
-static const uint32 kWav_fmt = 0x666d7420;
-// "data" in ASCII (big-endian)
-static const uint32 kWav_data = 0x64617461;
-
-namespace media {
-
-ShellWavTestProbe::ShellWavTestProbe()
- : wav_file_(NULL),
- form_wav_length_bytes_(kWavTotalHeaderLength - 8),
- format_code_(0),
- channels_(0),
- samples_per_second_(0),
- bits_per_sample_(0),
- bytes_per_frame_(0),
- closed_(true),
- close_after_ms_(0) {}
-
-void ShellWavTestProbe::Initialize(const char* file_name,
- int channel_count,
- int samples_per_second,
- int bits_per_sample,
- bool use_floats) {
- // try to open file first
- FilePath base_path;
- bool path_ok = PathService::Get(base::DIR_EXE, &base_path);
- DCHECK(path_ok);
- base_path = base_path.Append(file_name);
- wav_file_ = base::CreatePlatformFile(
- base_path, base::PLATFORM_FILE_CREATE_ALWAYS | base::PLATFORM_FILE_WRITE,
- NULL, NULL);
- DCHECK_NE(wav_file_, base::kInvalidPlatformFileValue);
- closed_ = false;
-
- if (use_floats)
- format_code_ = kWavFormatCodeIEEEFloat;
- else
- format_code_ = kWavFormatCodePCM;
-
- channels_ = channel_count;
-
- bits_per_sample_ = (uint16)bits_per_sample;
- samples_per_second_ = samples_per_second;
-
- bytes_per_frame_ = (bits_per_sample_ / 8) * channels_;
-
- // write temporary header, it's incomplete until we know the whole length
- // of the sample stream, but this will advance file pointer to start of
- // audio data
- WriteHeader();
-}
-
-// see: http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
-void ShellWavTestProbe::WriteHeader() {
- // first four bytes are FORM RIFF header
- endian_util::store_uint32_big_endian(kWav_RIFF, wav_header_buffer_);
- // next for are length of file - FORM header, uint32 little-endian
- endian_util::store_uint32_little_endian(form_wav_length_bytes_,
- wav_header_buffer_ + 4);
- // then WAVE header
- endian_util::store_uint32_big_endian(kWav_WAVE, wav_header_buffer_ + 8);
- // start common chunk with format "fmt " header
- endian_util::store_uint32_big_endian(kWav_fmt, wav_header_buffer_ + 12);
- // length of format chunk, uint32 little-endian
- endian_util::store_uint32_little_endian(kWavFormatChunkLength - 8,
- wav_header_buffer_ + 16);
- // format code, uint16 little-endian
- endian_util::store_uint16_little_endian(format_code_,
- wav_header_buffer_ + 20);
- // number of channels, uint16 little-endian
- endian_util::store_uint16_little_endian(channels_, wav_header_buffer_ + 22);
- // sample rate, uint32 little-endian
- endian_util::store_uint32_little_endian(samples_per_second_,
- wav_header_buffer_ + 24);
- // average bytes per second, uint32 little-endian, derived
- uint32 bytes_per_second = samples_per_second_ * bytes_per_frame_;
- endian_util::store_uint32_little_endian(bytes_per_second,
- wav_header_buffer_ + 28);
- // "block align", reading as bytes per frame, uint16 little-endian
- endian_util::store_uint16_little_endian(bytes_per_frame_,
- wav_header_buffer_ + 32);
- // bits per sample, uint16 little-endian
- endian_util::store_uint16_little_endian(bits_per_sample_,
- wav_header_buffer_ + 34);
- // size of extension format chunk header, uint16 little-endian
- // always 22 bytes for us so we can do > 2 channels audio
- endian_util::store_uint16_little_endian(22, wav_header_buffer_ + 36);
- // valid bits per sample, always same as bits per sample
- endian_util::store_uint16_little_endian(bits_per_sample_,
- wav_header_buffer_ + 38);
- // channel mask, 4 bytes, set to all zeroes to keep default channel layout
- endian_util::store_uint32_little_endian(0, wav_header_buffer_ + 40);
- // subformat guid, 16 bytes, first two bytes are format code again, rest
- // are a magic number 00 00 00 00 10 00 80 00 00 aa 00 38 9b 71
- uint64 magic_msd = ((uint64)format_code_ << 48) | 0x0000000000001000;
- endian_util::store_uint64_big_endian(magic_msd, wav_header_buffer_ + 44);
- endian_util::store_uint64_big_endian(0x800000aa00389b71,
- wav_header_buffer_ + 52);
- // start the data chunk with "data" header
- endian_util::store_uint32_big_endian(kWav_data, wav_header_buffer_ + 60);
- // data chunk size is form wav length minus the rest of the header bytes
- // uint32 little-endian
- uint32 data_chunk_size = form_wav_length_bytes_ - (kWavTotalHeaderLength - 8);
- endian_util::store_uint32_little_endian(data_chunk_size,
- wav_header_buffer_ + 64);
- // alright, aiff header buffer is current, now we can write it into the file
- // jump to start of file
- int result =
- base::SeekPlatformFile(wav_file_, base::PLATFORM_FILE_FROM_BEGIN, 0);
- // write buffer
- result = base::WritePlatformFileAtCurrentPos(
- wav_file_, reinterpret_cast<const char*>(wav_header_buffer_),
- kWavTotalHeaderLength);
- DCHECK_EQ(result, kWavTotalHeaderLength);
-}
-
-void ShellWavTestProbe::CloseAfter(uint64 milliseconds) {
- close_after_ms_ = milliseconds;
-}
-
-void ShellWavTestProbe::AddData(const uint8* data,
- uint32 length,
- uint64 timestamp) {
-#if defined(__LB_SHELL__BIG_ENDIAN__) || \
- (defined(OS_STARBOARD) && defined(SB_IS_BIG_ENDIAN) && SB_IS_BIG_ENDIAN)
- uint8* reverse_buffer = (uint8*)malloc(length);
- uint16 bytes_per_sample = bits_per_sample_ / 8;
- int num_words = length / bytes_per_sample;
- for (int i = 0; i < num_words; i++) {
- uint8* out = reverse_buffer + (i * bytes_per_sample);
- if (bytes_per_sample == 2) {
- endian_util::store_uint16_little_endian(((uint16*)data)[i], out);
- } else if (bytes_per_sample == 4) {
- endian_util::store_uint32_little_endian(((uint32*)data)[i], out);
- } else {
- DLOG(ERROR) << "Failed to add data";
- }
- }
- AddDataLittleEndian(reverse_buffer, length, timestamp);
- free(reverse_buffer);
-#else
- AddDataLittleEndian(data, length, timestamp);
-#endif
-}
-
-void ShellWavTestProbe::AddDataLittleEndian(const uint8* data,
- uint32 length,
- uint64 timestamp) {
- if (closed_)
- return;
- if (!length)
- return;
-
- int result = base::WritePlatformFileAtCurrentPos(
- wav_file_, reinterpret_cast<const char*>(data), length);
- DCHECK_EQ(result, length);
- base::FlushPlatformFile(wav_file_);
-
- // update our counters
- form_wav_length_bytes_ += length;
-
- if (close_after_ms_ > 0) {
- if (timestamp == 0) {
- // guess at timestamp based on total file size
- timestamp = (((uint64)form_wav_length_bytes_ -
- (uint64)(kWavTotalHeaderLength - 8)) *
- 1000ULL) /
- (uint64)(samples_per_second_ * bytes_per_frame_);
- }
- if (timestamp > close_after_ms_) {
- Close();
- }
- }
-}
-
-void ShellWavTestProbe::AddData(const scoped_refptr<Buffer>& buffer) {
- uint64 timestamp = 0;
- if (buffer->GetTimestamp() != kNoTimestamp()) {
- timestamp = buffer->GetTimestamp().InMilliseconds();
- }
- AddData(buffer->GetData(), buffer->GetDataSize(), timestamp);
-}
-
-void ShellWavTestProbe::Close() {
- if (closed_)
- return;
-
- closed_ = true;
- // write the header again now that we know the lengths
- WriteHeader();
- // close the file
- base::ClosePlatformFile(wav_file_);
- wav_file_ = NULL;
-}
-
-} // namespace media
-
-#endif // __LB_SHELL__FOR_RELEASE__
diff --git a/src/media/audio/shell_wav_test_probe.h b/src/media/audio/shell_wav_test_probe.h
deleted file mode 100644
index db37a8d..0000000
--- a/src/media/audio/shell_wav_test_probe.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_AUDIO_SHELL_WAV_TEST_PROBE_H_
-#define MEDIA_AUDIO_SHELL_WAV_TEST_PROBE_H_
-
-#include "base/platform_file.h"
-#include "media/base/buffers.h"
-
-// don't include me in release builds please
-#if !defined(__LB_SHELL__FOR_RELEASE__)
-
-static const uint32 kFormWavHeaderLength = 12;
-static const uint32 kWavFormatChunkLength = 48;
-static const uint32 kWavDataChunkHeaderLength = 8;
-static const uint32 kWavTotalHeaderLength =
- kFormWavHeaderLength + kWavFormatChunkLength + kWavDataChunkHeaderLength;
-
-namespace media {
-
-// Utility class for saving decoded audio bytes into a WAV file
-class MEDIA_EXPORT ShellWavTestProbe {
- public:
- ShellWavTestProbe();
- // if use_floats is true then the data is written as floating point,
- // if false it is assumed to be PCM unsigned ints
- void Initialize(const char* file_name,
- int channel_count,
- int samples_per_second,
- int bits_per_sample,
- bool use_floats);
- // automatically close the file after arg milliseconds added to file,
- // Close() will happen on first call to AddData() with timestamp past argument
- void CloseAfter(uint64 milliseconds);
- void AddData(const scoped_refptr<Buffer>& buffer);
- // timestamp can be zero, in which case we will guess at timestamp based on
- // number of bytes written, size of samples, and sample rate
- void AddData(const uint8* data, uint32 length, uint64 timestamp);
- void AddDataLittleEndian(const uint8* data, uint32 length, uint64 timestamp);
- void Close();
-
- private:
- // take the current state variables below and use them to write the
- // WAV header at the top of the file. Moves the file pointer.
- void WriteHeader();
-
- base::PlatformFile wav_file_;
- // wav header state variables
- uint32 form_wav_length_bytes_;
- uint16 format_code_;
- uint16 channels_;
- uint32 samples_per_second_;
- uint16 bits_per_sample_;
- uint8 wav_header_buffer_[kWavTotalHeaderLength];
- uint32 bytes_per_frame_;
- // other state
- bool closed_;
- uint64 close_after_ms_;
-};
-
-} // namespace media
-
-#endif // __LB_SHELL__FOR_RELEASE__
-
-#endif // MEDIA_AUDIO_SHELL_WAV_TEST_PROBE_H_
diff --git a/src/media/audio/simple_sources.cc b/src/media/audio/simple_sources.cc
deleted file mode 100644
index aeac86d..0000000
--- a/src/media/audio/simple_sources.cc
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-// MSVC++ requires this to be set before any other includes to get M_PI.
-#define _USE_MATH_DEFINES
-#include <cmath>
-
-#include "media/audio/simple_sources.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "media/audio/audio_util.h"
-
-namespace media {
-
-//////////////////////////////////////////////////////////////////////////////
-// SineWaveAudioSource implementation.
-
-SineWaveAudioSource::SineWaveAudioSource(int channels,
- double freq, double sample_freq)
- : channels_(channels),
- f_(freq / sample_freq),
- time_state_(0),
- cap_(0),
- callbacks_(0),
- errors_(0) {
-}
-
-// The implementation could be more efficient if a lookup table is constructed
-// but it is efficient enough for our simple needs.
-int SineWaveAudioSource::OnMoreData(AudioBus* audio_bus,
- AudioBuffersState audio_buffers) {
- base::AutoLock auto_lock(time_lock_);
- callbacks_++;
-
- // The table is filled with s(t) = kint16max*sin(Theta*t),
- // where Theta = 2*PI*fs.
- // We store the discrete time value |t| in a member to ensure that the
- // next pass starts at a correct state.
- int max_frames = cap_ > 0 ?
- std::min(audio_bus->frames(), cap_ - time_state_) : audio_bus->frames();
- for (int i = 0; i < max_frames; ++i)
- audio_bus->channel(0)[i] = sin(2.0 * M_PI * f_ * time_state_++);
- for (int i = 1; i < audio_bus->channels(); ++i) {
- memcpy(audio_bus->channel(i), audio_bus->channel(0),
- max_frames * sizeof(*audio_bus->channel(i)));
- }
- return max_frames;
-}
-
-int SineWaveAudioSource::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) {
- return OnMoreData(dest, audio_buffers);
-}
-
-void SineWaveAudioSource::OnError(AudioOutputStream* stream, int code) {
- errors_++;
-}
-
-void SineWaveAudioSource::CapSamples(int cap) {
- base::AutoLock auto_lock(time_lock_);
- DCHECK_GT(cap, 0);
- cap_ = cap;
-}
-
-void SineWaveAudioSource::Reset() {
- base::AutoLock auto_lock(time_lock_);
- time_state_ = 0;
-}
-
-} // namespace media
diff --git a/src/media/audio/simple_sources.h b/src/media/audio/simple_sources.h
deleted file mode 100644
index 80bd517..0000000
--- a/src/media/audio/simple_sources.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SIMPLE_SOURCES_H_
-#define MEDIA_AUDIO_SIMPLE_SOURCES_H_
-
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/base/seekable_buffer.h"
-
-namespace media {
-
-// An audio source that produces a pure sinusoidal tone.
-class MEDIA_EXPORT SineWaveAudioSource
- : public AudioOutputStream::AudioSourceCallback {
- public:
- // |channels| is the number of audio channels, |freq| is the frequency in
- // hertz and it has to be less than half of the sampling frequency
- // |sample_freq| or else you will get aliasing.
- SineWaveAudioSource(int channels, double freq, double sample_freq);
- virtual ~SineWaveAudioSource() {}
-
- // Return up to |cap| samples of data via OnMoreData(). Use Reset() to
- // allow more data to be served.
- void CapSamples(int cap);
- void Reset();
-
- // Implementation of AudioSourceCallback.
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState audio_buffers) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) OVERRIDE;
- virtual void OnError(AudioOutputStream* stream, int code) OVERRIDE;
-
- // The number of OnMoreData()+OnMoreIOData() and OnError() calls respectively.
- int callbacks() { return callbacks_; }
- int errors() { return errors_; }
-
- protected:
- int channels_;
- double f_;
- int time_state_;
- int cap_;
- int callbacks_;
- int errors_;
- base::Lock time_lock_;
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_SIMPLE_SOURCES_H_
diff --git a/src/media/audio/simple_sources_unittest.cc b/src/media/audio/simple_sources_unittest.cc
deleted file mode 100644
index cee5d8a..0000000
--- a/src/media/audio/simple_sources_unittest.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "base/logging.h"
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/simple_sources.h"
-#include "media/base/audio_bus.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-// Validate that the SineWaveAudioSource writes the expected values.
-TEST(SimpleSources, SineWaveAudioSource) {
- static const uint32 samples = 1024;
- static const uint32 bytes_per_sample = 2;
- static const int freq = 200;
-
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kTelephoneSampleRate, bytes_per_sample * 8, samples);
-
- SineWaveAudioSource source(1, freq, params.sample_rate());
- scoped_ptr<AudioBus> audio_bus = AudioBus::Create(params);
- source.OnMoreData(audio_bus.get(), AudioBuffersState());
- EXPECT_EQ(1, source.callbacks());
- EXPECT_EQ(0, source.errors());
-
- uint32 half_period = AudioParameters::kTelephoneSampleRate / (freq * 2);
-
- // Spot test positive incursion of sine wave.
- EXPECT_NEAR(0, audio_bus->channel(0)[0],
- std::numeric_limits<float>::epsilon());
- EXPECT_FLOAT_EQ(0.15643446f, audio_bus->channel(0)[1]);
- EXPECT_LT(audio_bus->channel(0)[1], audio_bus->channel(0)[2]);
- EXPECT_LT(audio_bus->channel(0)[2], audio_bus->channel(0)[3]);
- // Spot test negative incursion of sine wave.
- EXPECT_NEAR(0, audio_bus->channel(0)[half_period],
- std::numeric_limits<float>::epsilon());
- EXPECT_FLOAT_EQ(-0.15643446f, audio_bus->channel(0)[half_period + 1]);
- EXPECT_GT(audio_bus->channel(0)[half_period + 1],
- audio_bus->channel(0)[half_period + 2]);
- EXPECT_GT(audio_bus->channel(0)[half_period + 2],
- audio_bus->channel(0)[half_period + 3]);
-}
-
-TEST(SimpleSources, SineWaveAudioCapped) {
- SineWaveAudioSource source(1, 200, AudioParameters::kTelephoneSampleRate);
-
- static const int kSampleCap = 100;
- source.CapSamples(kSampleCap);
-
- scoped_ptr<AudioBus> audio_bus = AudioBus::Create(1, 2 * kSampleCap);
- EXPECT_EQ(source.OnMoreData(
- audio_bus.get(), AudioBuffersState()), kSampleCap);
- EXPECT_EQ(1, source.callbacks());
- EXPECT_EQ(source.OnMoreData(audio_bus.get(), AudioBuffersState()), 0);
- EXPECT_EQ(2, source.callbacks());
- source.Reset();
- EXPECT_EQ(source.OnMoreData(
- audio_bus.get(), AudioBuffersState()), kSampleCap);
- EXPECT_EQ(3, source.callbacks());
- EXPECT_EQ(0, source.errors());
-}
-
-TEST(SimpleSources, OnError) {
- SineWaveAudioSource source(1, 200, AudioParameters::kTelephoneSampleRate);
- source.OnError(NULL, 0);
- EXPECT_EQ(1, source.errors());
- source.OnError(NULL, 0);
- EXPECT_EQ(2, source.errors());
-}
-
-} // namespace media
diff --git a/src/media/audio/test_audio_input_controller_factory.cc b/src/media/audio/test_audio_input_controller_factory.cc
deleted file mode 100644
index 64bfb9f..0000000
--- a/src/media/audio/test_audio_input_controller_factory.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/test_audio_input_controller_factory.h"
-#include "media/audio/audio_io.h"
-
-namespace media {
-
-TestAudioInputController::TestAudioInputController(
- TestAudioInputControllerFactory* factory,
- AudioManager* audio_manager,
- const AudioParameters& audio_parameters,
- EventHandler* event_handler,
- SyncWriter* sync_writer)
- : AudioInputController(event_handler, sync_writer),
- audio_parameters_(audio_parameters),
- factory_(factory),
- event_handler_(event_handler) {
- message_loop_ = audio_manager->GetMessageLoop();
-}
-
-TestAudioInputController::~TestAudioInputController() {
- // Inform the factory so that it allows creating new instances in future.
- factory_->OnTestAudioInputControllerDestroyed(this);
-}
-
-void TestAudioInputController::Record() {
- if (factory_->delegate_)
- factory_->delegate_->TestAudioControllerOpened(this);
-}
-
-void TestAudioInputController::Close(const base::Closure& closed_task) {
- message_loop_->PostTask(FROM_HERE, closed_task);
- if (factory_->delegate_)
- factory_->delegate_->TestAudioControllerClosed(this);
-}
-
-TestAudioInputControllerFactory::TestAudioInputControllerFactory()
- : controller_(NULL),
- delegate_(NULL) {
-}
-
-TestAudioInputControllerFactory::~TestAudioInputControllerFactory() {
- DCHECK(!controller_);
-}
-
-AudioInputController* TestAudioInputControllerFactory::Create(
- AudioManager* audio_manager,
- AudioInputController::EventHandler* event_handler,
- AudioParameters params) {
- DCHECK(!controller_); // Only one test instance managed at a time.
- controller_ = new TestAudioInputController(this, audio_manager, params,
- event_handler, NULL);
- return controller_;
-}
-
-void TestAudioInputControllerFactory::SetDelegateForTests(
- TestAudioInputControllerDelegate* delegate) {
- delegate_ = delegate;
-}
-
-void TestAudioInputControllerFactory::OnTestAudioInputControllerDestroyed(
- TestAudioInputController* controller) {
- DCHECK_EQ(controller_, controller);
- controller_ = NULL;
-}
-
-} // namespace media
diff --git a/src/media/audio/test_audio_input_controller_factory.h b/src/media/audio/test_audio_input_controller_factory.h
deleted file mode 100644
index 0a17947..0000000
--- a/src/media/audio/test_audio_input_controller_factory.h
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
-#define MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
-
-#include "base/bind.h"
-#include "media/audio/audio_input_controller.h"
-
-namespace media {
-
-class TestAudioInputControllerFactory;
-
-// TestAudioInputController and TestAudioInputControllerFactory are used for
-// testing consumers of AudioInputController. TestAudioInputControllerFactory
-// is a AudioInputController::Factory that creates TestAudioInputControllers.
-//
-// TestAudioInputController::Record and Close are overriden to do nothing. It is
-// expected that you'll grab the EventHandler from the TestAudioInputController
-// and invoke the callback methods when appropriate. In this way it's easy to
-// mock a AudioInputController.
-//
-// Typical usage:
-// // Create and register factory.
-// TestAudioInputControllerFactory factory;
-// AudioInputController::set_factory_for_testing(&factory);
-//
-// // Do something that triggers creation of an AudioInputController.
-// TestAudioInputController* controller = factory.last_controller();
-// DCHECK(controller);
-//
-// // Notify event handler with whatever data you want.
-// controller->event_handler()->OnCreated(...);
-//
-// // Do something that triggers AudioInputController::Record to be called.
-// controller->event_handler()->OnData(...);
-// controller->event_handler()->OnError(...);
-//
-// // Make sure consumer of AudioInputController does the right thing.
-// ...
-// // Reset factory.
-// AudioInputController::set_factory_for_testing(NULL);
-
-class TestAudioInputController : public AudioInputController {
- public:
- class Delegate {
- public:
- virtual void TestAudioControllerOpened(
- TestAudioInputController* controller) = 0;
- virtual void TestAudioControllerClosed(
- TestAudioInputController* controller) = 0;
- };
-
- TestAudioInputController(TestAudioInputControllerFactory* factory,
- AudioManager* audio_manager,
- const AudioParameters& audio_parameters,
- EventHandler* event_handler,
- SyncWriter* sync_writer);
-
- // Returns the event handler installed on the AudioInputController.
- EventHandler* event_handler() const { return event_handler_; }
-
- // Notifies the TestAudioControllerOpened() event to the delegate (if any).
- virtual void Record() OVERRIDE;
-
- // Ensure that the closure is run on the audio-manager thread.
- virtual void Close(const base::Closure& closed_task) OVERRIDE;
-
- protected:
- virtual ~TestAudioInputController();
-
- private:
- AudioParameters audio_parameters_;
-
- // These are not owned by us and expected to be valid for this object's
- // lifetime.
- TestAudioInputControllerFactory* factory_;
- EventHandler* event_handler_;
-
- DISALLOW_COPY_AND_ASSIGN(TestAudioInputController);
-};
-
-typedef TestAudioInputController::Delegate TestAudioInputControllerDelegate;
-
-// Simple AudioInputController::Factory method that creates
-// TestAudioInputControllers.
-class TestAudioInputControllerFactory : public AudioInputController::Factory {
- public:
- TestAudioInputControllerFactory();
- virtual ~TestAudioInputControllerFactory();
-
- // AudioInputController::Factory methods.
- virtual AudioInputController* Create(
- AudioManager* audio_manager,
- AudioInputController::EventHandler* event_handler,
- AudioParameters params) OVERRIDE;
-
- void SetDelegateForTests(TestAudioInputControllerDelegate* delegate);
-
- TestAudioInputController* controller() const { return controller_; }
-
- private:
- friend class TestAudioInputController;
-
- // Invoked by a TestAudioInputController when it gets destroyed.
- void OnTestAudioInputControllerDestroyed(
- TestAudioInputController* controller);
-
- // The caller of Create owns this object.
- TestAudioInputController* controller_;
-
- // The delegate for tests for receiving audio controller events.
- TestAudioInputControllerDelegate* delegate_;
-
- DISALLOW_COPY_AND_ASSIGN(TestAudioInputControllerFactory);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_TEST_AUDIO_INPUT_CONTROLLER_FACTORY_H_
diff --git a/src/media/audio/virtual_audio_input_stream.cc b/src/media/audio/virtual_audio_input_stream.cc
deleted file mode 100644
index 4f2f9bd..0000000
--- a/src/media/audio/virtual_audio_input_stream.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/virtual_audio_input_stream.h"
-
-#include <algorithm>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "media/audio/virtual_audio_output_stream.h"
-
-namespace media {
-
-// LoopbackAudioConverter works similar to AudioConverter and converts input
-// streams to different audio parameters. Then, the LoopbackAudioConverter can
-// be used as an input to another AudioConverter. This allows us to
-// use converted audio from AudioOutputStreams as input to an AudioConverter.
-// For example, this allows converting multiple streams into a common format and
-// using the converted audio as input to another AudioConverter (i.e. a mixer).
-class LoopbackAudioConverter : public AudioConverter::InputCallback {
- public:
- LoopbackAudioConverter(const AudioParameters& input_params,
- const AudioParameters& output_params)
- : audio_converter_(input_params, output_params, false) {}
-
- virtual ~LoopbackAudioConverter() {}
-
- void AddInput(AudioConverter::InputCallback* input) {
- audio_converter_.AddInput(input);
- }
-
- void RemoveInput(AudioConverter::InputCallback* input) {
- audio_converter_.RemoveInput(input);
- }
-
- private:
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE {
- audio_converter_.Convert(audio_bus);
- return 1.0;
- }
-
- AudioConverter audio_converter_;
-
- DISALLOW_COPY_AND_ASSIGN(LoopbackAudioConverter);
-};
-
-VirtualAudioInputStream* VirtualAudioInputStream::MakeStream(
- AudioManagerBase* manager, const AudioParameters& params,
- base::MessageLoopProxy* message_loop) {
- return new VirtualAudioInputStream(manager, params, message_loop);
-}
-
-VirtualAudioInputStream::VirtualAudioInputStream(
- AudioManagerBase* manager, const AudioParameters& params,
- base::MessageLoopProxy* message_loop)
- : audio_manager_(manager),
- message_loop_(message_loop),
- callback_(NULL),
- buffer_duration_ms_(base::TimeDelta::FromMilliseconds(
- params.frames_per_buffer() * base::Time::kMillisecondsPerSecond /
- static_cast<float>(params.sample_rate()))),
- buffer_(new uint8[params.GetBytesPerBuffer()]),
- params_(params),
- audio_bus_(AudioBus::Create(params_)),
- mixer_(params_, params_, false),
- num_attached_outputs_streams_(0) {
-}
-
-VirtualAudioInputStream::~VirtualAudioInputStream() {
- for (AudioConvertersMap::iterator it = converters_.begin();
- it != converters_.end(); ++it)
- delete it->second;
-
- DCHECK_EQ(0, num_attached_outputs_streams_);
-}
-
-bool VirtualAudioInputStream::Open() {
- memset(buffer_.get(), 0, params_.GetBytesPerBuffer());
- return true;
-}
-
-void VirtualAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- callback_ = callback;
- on_more_data_cb_.Reset(base::Bind(&VirtualAudioInputStream::ReadAudio,
- base::Unretained(this)));
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE,
- on_more_data_cb_.callback());
-}
-
-void VirtualAudioInputStream::Stop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- on_more_data_cb_.Cancel();
-}
-
-void VirtualAudioInputStream::AddOutputStream(
- VirtualAudioOutputStream* stream, const AudioParameters& output_params) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- AudioConvertersMap::iterator converter = converters_.find(output_params);
- if (converter == converters_.end()) {
- std::pair<AudioConvertersMap::iterator, bool> result = converters_.insert(
- std::make_pair(output_params,
- new LoopbackAudioConverter(output_params, params_)));
- converter = result.first;
-
- // Add to main mixer if we just added a new AudioTransform.
- mixer_.AddInput(converter->second);
- }
- converter->second->AddInput(stream);
- ++num_attached_outputs_streams_;
-}
-
-void VirtualAudioInputStream::RemoveOutputStream(
- VirtualAudioOutputStream* stream, const AudioParameters& output_params) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- DCHECK(converters_.find(output_params) != converters_.end());
- converters_[output_params]->RemoveInput(stream);
-
- --num_attached_outputs_streams_;
-}
-
-void VirtualAudioInputStream::ReadAudio() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(callback_);
-
- mixer_.Convert(audio_bus_.get());
- audio_bus_->ToInterleaved(params_.frames_per_buffer(),
- params_.bits_per_sample() / 8,
- buffer_.get());
-
- callback_->OnData(this,
- buffer_.get(),
- params_.GetBytesPerBuffer(),
- params_.GetBytesPerBuffer(),
- 1.0);
-
- message_loop_->PostDelayedTask(FROM_HERE,
- on_more_data_cb_.callback(),
- buffer_duration_ms_);
-}
-
-void VirtualAudioInputStream::Close() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- if (callback_) {
- DCHECK(on_more_data_cb_.IsCancelled());
- callback_->OnClose(this);
- callback_ = NULL;
- }
- audio_manager_->ReleaseInputStream(this);
-}
-
-double VirtualAudioInputStream::GetMaxVolume() {
- return 1.0;
-}
-
-void VirtualAudioInputStream::SetVolume(double volume) {}
-
-double VirtualAudioInputStream::GetVolume() {
- return 1.0;
-}
-
-void VirtualAudioInputStream::SetAutomaticGainControl(bool enabled) {}
-
-bool VirtualAudioInputStream::GetAutomaticGainControl() {
- return false;
-}
-
-} // namespace media
diff --git a/src/media/audio/virtual_audio_input_stream.h b/src/media/audio/virtual_audio_input_stream.h
deleted file mode 100644
index fcb87a4..0000000
--- a/src/media/audio/virtual_audio_input_stream.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
-#define MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
-
-#include <map>
-#include <set>
-
-#include "base/cancelable_callback.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_converter.h"
-
-namespace media {
-
-class LoopbackAudioConverter;
-class VirtualAudioOutputStream;
-
-// VirtualAudioInputStream converts and mixes audio from attached
-// VirtualAudioOutputStreams into a single stream. It will continuously render
-// audio until this VirtualAudioInputStream is stopped and closed.
-class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
- public:
- static VirtualAudioInputStream* MakeStream(
- AudioManagerBase* manager,
- const AudioParameters& params,
- base::MessageLoopProxy* message_loop);
-
- virtual ~VirtualAudioInputStream();
-
- // AudioInputStream:
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- // Attaches a VirtualAudioOutputStream to be used as input. This
- // VirtualAudioInputStream must outlive all attached streams, so any attached
- // stream must be closed (which causes a detach) before
- // VirtualAudioInputStream is destroyed.
- virtual void AddOutputStream(VirtualAudioOutputStream* stream,
- const AudioParameters& output_params);
-
- // Detaches a VirtualAudioOutputStream and removes it as input.
- virtual void RemoveOutputStream(VirtualAudioOutputStream* stream,
- const AudioParameters& output_params);
-
- protected:
- friend class VirtualAudioInputStreamTest;
- FRIEND_TEST_ALL_PREFIXES(AudioOutputControllerTest,
- VirtualStreamsTriggerDeviceChange);
-
- typedef std::map<AudioParameters, LoopbackAudioConverter*> AudioConvertersMap;
-
- VirtualAudioInputStream(AudioManagerBase* manager,
- const AudioParameters& params,
- base::MessageLoopProxy* message_loop);
-
- // When Start() is called on this class, we continuously schedule this
- // callback to render audio using any attached VirtualAudioOutputStreams until
- // Stop() is called.
- void ReadAudio();
-
- AudioManagerBase* audio_manager_;
- base::MessageLoopProxy* message_loop_;
- AudioInputCallback* callback_;
-
- // Non-const for testing.
- base::TimeDelta buffer_duration_ms_;
- scoped_array<uint8> buffer_;
- AudioParameters params_;
- scoped_ptr<AudioBus> audio_bus_;
- base::CancelableClosure on_more_data_cb_;
-
- // AudioConverters associated with the attached VirtualAudioOutputStreams,
- // partitioned by common AudioParameters.
- AudioConvertersMap converters_;
-
- // AudioConverter that takes all the audio converters and mixes them into one
- // final audio stream.
- AudioConverter mixer_;
-
- // Number of currently attached VirtualAudioOutputStreams.
- int num_attached_outputs_streams_;
-
- DISALLOW_COPY_AND_ASSIGN(VirtualAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_VIRTUAL_AUDIO_INPUT_STREAM_H_
diff --git a/src/media/audio/virtual_audio_input_stream_unittest.cc b/src/media/audio/virtual_audio_input_stream_unittest.cc
deleted file mode 100644
index eb65b96..0000000
--- a/src/media/audio/virtual_audio_input_stream_unittest.cc
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "media/audio/virtual_audio_input_stream.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class MockInputCallback : public AudioInputStream::AudioInputCallback {
- public:
- MockInputCallback() {}
- virtual void OnData(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes,
- double volume) {}
- virtual void OnClose(AudioInputStream* stream) {}
- virtual void OnError(AudioInputStream* stream, int code) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockInputCallback);
-};
-
-class VirtualAudioInputStreamTest : public testing::Test {
- public:
- VirtualAudioInputStreamTest()
- : audio_manager_(AudioManager::Create()),
- params_(
- AudioParameters::AUDIO_VIRTUAL,CHANNEL_LAYOUT_MONO, 8000, 8, 128),
- output_params_(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO, 8000, 8,
- 128),
- stream_(NULL),
- source_(CHANNEL_LAYOUT_STEREO, 200.0, 128),
- done_(false, false) {
- }
-
- void StartStreamAndRunTestsOnAudioThread(int num_output_streams,
- int num_callback_iterations,
- int num_streams_removed_per_round,
- int num_expected_source_callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- stream_->Open();
- stream_->Start(&input_callback_);
- AddStreamsAndDoCallbacks(num_output_streams,
- num_callback_iterations,
- num_streams_removed_per_round,
- num_expected_source_callbacks);
- }
-
- void AddStreamsAndDoCallbacks(int num_output_streams,
- int num_callback_iterations,
- int num_streams_removed_per_round,
- int num_expected_source_callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- for (int i = 0; i < num_output_streams; ++i) {
- AudioOutputStream* output_stream =
- audio_manager_->MakeAudioOutputStream(output_params_);
- DCHECK(output_stream);
- output_streams_.push_back(output_stream);
-
- output_stream->Open();
- output_stream->Start(&source_);
- }
-
- if (num_output_streams == 0 && num_streams_removed_per_round > 0) {
- AudioOutputStream* output_stream = output_streams_.back();
- output_streams_.pop_back();
- output_stream->Stop();
- output_stream->Close();
- }
-
- if (num_callback_iterations > 0) {
- // Force the next callback to be immediate.
- stream_->buffer_duration_ms_ = base::TimeDelta();
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::AddStreamsAndDoCallbacks,
- base::Unretained(this),
- 0,
- --num_callback_iterations,
- num_streams_removed_per_round,
- num_expected_source_callbacks));
- } else {
- // Finish the test.
- EXPECT_EQ(num_expected_source_callbacks, source_.callbacks());
- EXPECT_EQ(0, source_.errors());
-
- for (std::vector<AudioOutputStream*>::iterator it =
- output_streams_.begin(); it != output_streams_.end(); ++it)
- (*it)->Stop();
-
- stream_->Stop();
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::EndTest,
- base::Unretained(this)));
- }
- }
-
- void OpenAndCloseOnAudioThread() {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- stream_->Open();
-
- // Create 2 output streams, which we just open and close without starting.
- const int num_output_stream = 2;
-
- for (int i = 0; i < num_output_stream; ++i) {
- AudioOutputStream* output_stream =
- audio_manager_->MakeAudioOutputStream(output_params_);
- DCHECK(output_stream);
- output_streams_.push_back(output_stream);
-
- output_stream->Open();
- }
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::EndTest,
- base::Unretained(this)));
- }
-
- void StartStopOnAudioThread(int num_output_streams,
- int num_callback_iterations,
- int num_expected_source_callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
- stream_->Open();
- stream_->Start(&input_callback_);
- StartStopCallback(true, num_output_streams, num_callback_iterations,
- num_expected_source_callbacks);
- }
-
- void StartStopCallback(bool init,
- int num_output_streams,
- int num_callback_iterations,
- int num_expected_source_callbacks) {
- ASSERT_TRUE(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- if (init) {
- for (int i = 0; i < num_output_streams; ++i) {
- AudioOutputStream* output_stream =
- audio_manager_->MakeAudioOutputStream(output_params_);
- DCHECK(output_stream);
- output_streams_.push_back(output_stream);
-
- output_stream->Open();
- output_stream->Start(&source_);
- }
-
- // Start with an odd iteration number so we call Stop() first below.
- DCHECK_NE(0, num_callback_iterations % 2);
- }
-
- // Start or stop half the streams.
- for (int i = 0; i < num_output_streams / 2; ++i) {
- if (num_callback_iterations % 2 != 0)
- output_streams_[i]->Stop();
- else
- output_streams_[i]->Start(&source_);
- }
-
- if (num_callback_iterations > 0) {
- // Force the next callback to be immediate.
- stream_->buffer_duration_ms_ = base::TimeDelta::FromMilliseconds(0);
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::StartStopCallback,
- base::Unretained(this),
- false,
- num_output_streams,
- --num_callback_iterations,
- num_expected_source_callbacks));
- } else {
- // Finish the test.
- EXPECT_EQ(num_expected_source_callbacks, source_.callbacks());
- EXPECT_EQ(0, source_.errors());
-
- for (std::vector<AudioOutputStream*>::iterator it =
- output_streams_.begin(); it != output_streams_.end(); ++it)
- (*it)->Stop();
-
- stream_->Stop();
-
- audio_manager_->GetMessageLoop()->PostTask(FROM_HERE,
- base::Bind(&VirtualAudioInputStreamTest::EndTest,
- base::Unretained(this)));
- }
- }
-
- void EndTest() {
- for (std::vector<AudioOutputStream*>::iterator it =
- output_streams_.begin(); it != output_streams_.end(); ++it)
- (*it)->Close();
-
- stream_->Close();
-
- done_.Signal();
- }
-
- protected:
- scoped_ptr<AudioManager> audio_manager_;
- AudioParameters params_;
- AudioParameters output_params_;
- VirtualAudioInputStream* stream_;
- MockInputCallback input_callback_;
- std::vector<AudioOutputStream*> output_streams_;
- SineWaveAudioSource source_;
- base::WaitableEvent done_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VirtualAudioInputStreamTest);
-};
-
-TEST_F(VirtualAudioInputStreamTest, AttachAndDriveSingleStream) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- const int num_output_streams = 1;
- const int num_callback_iterations = 1;
- const int num_streams_removed_per_round = 0;
- const int num_expected_source_callbacks = 1;
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::StartStreamAndRunTestsOnAudioThread,
- base::Unretained(this),
- num_output_streams,
- num_callback_iterations,
- num_streams_removed_per_round,
- num_expected_source_callbacks));
-
- done_.Wait();
-}
-
-TEST_F(VirtualAudioInputStreamTest, AttachAndDriveMultipleStreams) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- const int num_output_streams = 5;
- const int num_callback_iterations = 5;
- const int num_streams_removed_per_round = 0;
- const int num_expected_source_callbacks = 25;
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::StartStreamAndRunTestsOnAudioThread,
- base::Unretained(this),
- num_output_streams,
- num_callback_iterations,
- num_streams_removed_per_round,
- num_expected_source_callbacks));
-
- done_.Wait();
-}
-
-TEST_F(VirtualAudioInputStreamTest, AttachAndRemoveStreams) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- const int num_output_streams = 8;
- const int num_callback_iterations = 5;
- const int num_streams_removed_per_round = 1;
- const int num_expected_source_callbacks = 8 + 7 + 6 + 5 + 4;
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::StartStreamAndRunTestsOnAudioThread,
- base::Unretained(this),
- num_output_streams,
- num_callback_iterations,
- num_streams_removed_per_round,
- num_expected_source_callbacks));
-
- done_.Wait();
-}
-
-// Opens/closes a VirtualAudioInputStream and a number of attached
-// VirtualAudioOutputStreams without calling Start()/Stop().
-TEST_F(VirtualAudioInputStreamTest, OpenAndClose) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::OpenAndCloseOnAudioThread,
- base::Unretained(this)));
-
- done_.Wait();
-}
-
-// Creates and closes and VirtualAudioInputStream.
-TEST_F(VirtualAudioInputStreamTest, CreateAndClose) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::EndTest,
- base::Unretained(this)));
-
- done_.Wait();
-}
-
-// Starts and stops VirtualAudioOutputStreams while attached to a
-// VirtualAudioInputStream.
-TEST_F(VirtualAudioInputStreamTest, AttachAndStartStopStreams) {
- stream_ = static_cast<VirtualAudioInputStream*>(
- audio_manager_->MakeAudioInputStream(params_, "1"));
- DCHECK(stream_);
-
- const int num_output_streams = 4;
- const int num_callback_iterations = 5;
- const int num_expected_source_callbacks = 2 + 4 + 2 + 4 + 2;
-
- audio_manager_->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioInputStreamTest::StartStopOnAudioThread,
- base::Unretained(this),
- num_output_streams,
- num_callback_iterations,
- num_expected_source_callbacks));
-
- done_.Wait();
-}
-
-} // namespace media
diff --git a/src/media/audio/virtual_audio_output_stream.cc b/src/media/audio/virtual_audio_output_stream.cc
deleted file mode 100644
index aacc667..0000000
--- a/src/media/audio/virtual_audio_output_stream.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/virtual_audio_output_stream.h"
-
-#include "base/message_loop.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/virtual_audio_input_stream.h"
-
-namespace media {
-
-// static
-VirtualAudioOutputStream* VirtualAudioOutputStream::MakeStream(
- AudioManagerBase* manager, const AudioParameters& params,
- base::MessageLoopProxy* message_loop, VirtualAudioInputStream* target) {
- return new VirtualAudioOutputStream(manager, params, message_loop, target);
-}
-
-VirtualAudioOutputStream::VirtualAudioOutputStream(
- AudioManagerBase* manager, const AudioParameters& params,
- base::MessageLoopProxy* message_loop, VirtualAudioInputStream* target)
- : audio_manager_(manager), message_loop_(message_loop), callback_(NULL),
- params_(params), target_input_stream_(target), volume_(1.0f),
- attached_(false) {
-}
-
-VirtualAudioOutputStream::~VirtualAudioOutputStream() {
- DCHECK(!callback_);
- DCHECK(!attached_);
-}
-
-bool VirtualAudioOutputStream::Open() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- return true;
-}
-
-void VirtualAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!attached_);
- callback_ = callback;
- target_input_stream_->AddOutputStream(this, params_);
- attached_ = true;
-}
-
-void VirtualAudioOutputStream::Stop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(attached_);
- callback_ = NULL;
- target_input_stream_->RemoveOutputStream(this, params_);
- attached_ = false;
-}
-
-void VirtualAudioOutputStream::Close() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- audio_manager_->ReleaseOutputStream(this);
-}
-
-void VirtualAudioOutputStream::SetVolume(double volume) {
- volume_ = volume;
-}
-
-void VirtualAudioOutputStream::GetVolume(double* volume) {
- *volume = volume_;
-}
-
-double VirtualAudioOutputStream::ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(callback_);
-
- int frames = callback_->OnMoreData(audio_bus, AudioBuffersState());
- if (frames < audio_bus->frames())
- audio_bus->ZeroFramesPartial(frames, audio_bus->frames() - frames);
-
- return frames > 0 ? volume_ : 0;
-}
-
-} // namespace media
diff --git a/src/media/audio/virtual_audio_output_stream.h b/src/media/audio/virtual_audio_output_stream.h
deleted file mode 100644
index 0c2969d..0000000
--- a/src/media/audio/virtual_audio_output_stream.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
-#define MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
-
-#include "base/message_loop_proxy.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_converter.h"
-
-namespace media {
-
-class AudioManagerBase;
-class VirtualAudioInputStream;
-
-// VirtualAudioOutputStream attaches to a VirtualAudioInputStream when Start()
-// is called and is used as an audio source. VirtualAudioOutputStream also
-// implements an interface so it can be used as an input to AudioConverter so
-// that we can get audio frames that match the AudioParameters that
-// VirtualAudioInputStream expects.
-class MEDIA_EXPORT VirtualAudioOutputStream
- : public AudioOutputStream,
- public AudioConverter::InputCallback {
- public:
- static VirtualAudioOutputStream* MakeStream(
- AudioManagerBase* manager,
- const AudioParameters& params,
- base::MessageLoopProxy* message_loop,
- VirtualAudioInputStream* target);
-
- virtual ~VirtualAudioOutputStream();
-
- // AudioOutputStream:
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
- virtual void Close() OVERRIDE;
-
- protected:
- VirtualAudioOutputStream(AudioManagerBase* manager,
- const AudioParameters& params,
- base::MessageLoopProxy* message_loop,
- VirtualAudioInputStream* target);
-
- private:
- // AudioConverter::InputCallback:
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE;
-
- AudioManagerBase* audio_manager_;
- base::MessageLoopProxy* message_loop_;
- AudioSourceCallback* callback_;
- AudioParameters params_;
-
- // Pointer to the VirtualAudioInputStream to attach to when Start() is called.
- // This pointer should always be valid because VirtualAudioInputStream should
- // outlive this class.
- VirtualAudioInputStream* target_input_stream_;
- double volume_;
- bool attached_;
-
- DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_VIRTUAL_AUDIO_OUTPUT_STREAM_H_
diff --git a/src/media/audio/virtual_audio_output_stream_unittest.cc b/src/media/audio/virtual_audio_output_stream_unittest.cc
deleted file mode 100644
index ae267f5..0000000
--- a/src/media/audio/virtual_audio_output_stream_unittest.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "media/audio/virtual_audio_input_stream.h"
-#include "media/audio/virtual_audio_output_stream.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-
-namespace media {
-
-class MockVirtualAudioInputStream : public VirtualAudioInputStream {
- public:
- MockVirtualAudioInputStream(AudioManagerBase* manager,
- AudioParameters params,
- base::MessageLoopProxy* message_loop)
- : VirtualAudioInputStream(manager, params, message_loop) {}
- ~MockVirtualAudioInputStream() {}
-
- MOCK_METHOD2(AddOutputStream, void(VirtualAudioOutputStream* stream,
- const AudioParameters& output_params));
- MOCK_METHOD2(RemoveOutputStream, void(VirtualAudioOutputStream* stream,
- const AudioParameters& output_params));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockVirtualAudioInputStream);
-};
-
-class MockAudioDeviceListener : public AudioManager::AudioDeviceListener {
- public:
- MOCK_METHOD0(OnDeviceChange, void());
-};
-
-class VirtualAudioOutputStreamTest : public testing::Test {
- public:
- void ListenAndCreateVirtualOnAudioThread(
- AudioManager* manager, AudioManager::AudioDeviceListener* listener) {
- manager->AddOutputDeviceChangeListener(listener);
-
- AudioParameters params(
- AudioParameters::AUDIO_VIRTUAL, CHANNEL_LAYOUT_MONO, 8000, 8, 128);
- AudioInputStream* stream = manager->MakeAudioInputStream(params, "1");
- stream->Close();
- signal_.Signal();
- }
-
- void RemoveListenerOnAudioThread(
- AudioManager* manager, AudioManager::AudioDeviceListener* listener) {
- manager->RemoveOutputDeviceChangeListener(listener);
- signal_.Signal();
- }
-
- protected:
- VirtualAudioOutputStreamTest() : signal_(false, false) {}
-
- base::WaitableEvent signal_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStreamTest);
-};
-
-TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
-
- MessageLoop message_loop;
-
- AudioParameters params(
- AudioParameters::AUDIO_VIRTUAL, CHANNEL_LAYOUT_MONO, 8000, 8, 128);
- AudioParameters output_params(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO, 8000, 8, 128);
-
- MockVirtualAudioInputStream input_stream(
- static_cast<AudioManagerBase*>(audio_manager.get()),
- params,
- message_loop.message_loop_proxy());
-
- EXPECT_CALL(input_stream, AddOutputStream(_, _)).Times(2);
- EXPECT_CALL(input_stream, RemoveOutputStream(_, _)).Times(2);
-
- scoped_ptr<VirtualAudioOutputStream> output_stream(
- VirtualAudioOutputStream::MakeStream(
- static_cast<AudioManagerBase*>(audio_manager.get()),
- output_params,
- message_loop.message_loop_proxy(),
- &input_stream));
-
- SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 128);
- output_stream->Start(&source);
- output_stream->Stop();
- output_stream->Start(&source);
- output_stream->Stop();
- // Can't Close() here because we didn't create this output stream is not owned
- // by the audio manager.
-}
-
-// Tests that we get notifications to reattach output streams when we create a
-// VirtualAudioInputStream.
-TEST_F(VirtualAudioOutputStreamTest, OutputStreamsNotified) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
-
- MockAudioDeviceListener mock_listener;
- EXPECT_CALL(mock_listener, OnDeviceChange()).Times(2);
-
- audio_manager->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioOutputStreamTest::ListenAndCreateVirtualOnAudioThread,
- base::Unretained(this),
- audio_manager.get(),
- &mock_listener));
-
- signal_.Wait();
-
- audio_manager->GetMessageLoop()->PostTask(
- FROM_HERE, base::Bind(
- &VirtualAudioOutputStreamTest::RemoveListenerOnAudioThread,
- base::Unretained(this),
- audio_manager.get(),
- &mock_listener));
-
- signal_.Wait();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_device_listener_win.cc b/src/media/audio/win/audio_device_listener_win.cc
deleted file mode 100644
index 6664498..0000000
--- a/src/media/audio/win/audio_device_listener_win.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_device_listener_win.h"
-
-#include <Audioclient.h>
-
-#include "base/logging.h"
-#include "base/system_monitor/system_monitor.h"
-#include "base/utf_string_conversions.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/windows_version.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/core_audio_util_win.h"
-
-using base::win::ScopedCoMem;
-
-namespace media {
-
-AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
- : listener_cb_(listener_cb) {
- CHECK(CoreAudioUtil::IsSupported());
-
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator(
- CoreAudioUtil::CreateDeviceEnumerator());
- if (!device_enumerator)
- return;
-
- HRESULT hr = device_enumerator->RegisterEndpointNotificationCallback(this);
- if (FAILED(hr)) {
- DLOG(ERROR) << "RegisterEndpointNotificationCallback failed: "
- << std::hex << hr;
- return;
- }
-
- device_enumerator_ = device_enumerator;
-
- ScopedComPtr<IMMDevice> device =
- CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- if (!device) {
- // Most probable reason for ending up here is that all audio devices are
- // disabled or unplugged.
- DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
- return;
- }
-
- AudioDeviceName device_name;
- hr = CoreAudioUtil::GetDeviceName(device, &device_name);
- if (FAILED(hr)) {
- DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
- return;
- }
- default_render_device_id_ = device_name.unique_id;
-}
-
-AudioDeviceListenerWin::~AudioDeviceListenerWin() {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (device_enumerator_) {
- HRESULT hr =
- device_enumerator_->UnregisterEndpointNotificationCallback(this);
- DLOG_IF(ERROR, FAILED(hr)) << "UnregisterEndpointNotificationCallback() "
- << "failed: " << std::hex << hr;
- }
-}
-
-STDMETHODIMP_(ULONG) AudioDeviceListenerWin::AddRef() {
- return 1;
-}
-
-STDMETHODIMP_(ULONG) AudioDeviceListenerWin::Release() {
- return 1;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::QueryInterface(REFIID iid, void** object) {
- if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
- *object = static_cast<IMMNotificationClient*>(this);
- return S_OK;
- }
-
- *object = NULL;
- return E_NOINTERFACE;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::OnPropertyValueChanged(
- LPCWSTR device_id, const PROPERTYKEY key) {
- // TODO(dalecurtis): We need to handle changes for the current default device
- // here. It's tricky because this method may be called many (20+) times for
- // a single change like sample rate. http://crbug.com/153056
- return S_OK;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::OnDeviceAdded(LPCWSTR device_id) {
- // We don't care when devices are added.
- return S_OK;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::OnDeviceRemoved(LPCWSTR device_id) {
- // We don't care when devices are removed.
- return S_OK;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::OnDeviceStateChanged(LPCWSTR device_id,
- DWORD new_state) {
- if (new_state != DEVICE_STATE_ACTIVE && new_state != DEVICE_STATE_NOTPRESENT)
- return S_OK;
-
- base::SystemMonitor* monitor = base::SystemMonitor::Get();
- if (monitor)
- monitor->ProcessDevicesChanged(base::SystemMonitor::DEVTYPE_AUDIO_CAPTURE);
-
- return S_OK;
-}
-
-STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
- EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
- // Only listen for output device changes right now...
- if (flow != eConsole && role != eRender)
- return S_OK;
-
- // If no device is now available, |new_default_device_id| will be NULL.
- std::string new_device_id = "";
- if (new_default_device_id)
- new_device_id = WideToUTF8(new_default_device_id);
-
- // Only fire a state change event if the device has actually changed.
- // TODO(dalecurtis): This still seems to fire an extra event on my machine for
- // an unplug event (probably others too); e.g., we get two transitions to a
- // new default device id.
- if (new_device_id.compare(default_render_device_id_) == 0)
- return S_OK;
-
- default_render_device_id_ = new_device_id;
- listener_cb_.Run();
-
- return S_OK;
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_device_listener_win.h b/src/media/audio/win/audio_device_listener_win.h
deleted file mode 100644
index 6a31251..0000000
--- a/src/media/audio/win/audio_device_listener_win.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
-
-#include <MMDeviceAPI.h>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/threading/thread_checker.h"
-#include "base/win/scoped_comptr.h"
-#include "media/base/media_export.h"
-
-using base::win::ScopedComPtr;
-
-namespace media {
-
-// IMMNotificationClient implementation for listening for default device changes
-// and forwarding to AudioManagerWin so it can notify downstream clients. Only
-// output (eRender) device changes are supported currently. Core Audio support
-// is required to construct this object. Must be constructed and destructed on
-// a single COM initialized thread.
-// TODO(dalecurtis, henrika): Support input device changes.
-class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
- public:
- // The listener callback will be called from a system level multimedia thread,
- // thus the callee must be thread safe. |listener| is a permanent callback
- // and must outlive AudioDeviceListenerWin.
- explicit AudioDeviceListenerWin(const base::Closure& listener_cb);
- virtual ~AudioDeviceListenerWin();
-
- private:
- friend class AudioDeviceListenerWinTest;
-
- // IMMNotificationClient implementation.
- STDMETHOD_(ULONG, AddRef)();
- STDMETHOD_(ULONG, Release)();
- STDMETHOD(QueryInterface)(REFIID iid, void** object);
- STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key);
- STDMETHOD(OnDeviceAdded)(LPCWSTR device_id);
- STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id);
- STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state);
- STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role,
- LPCWSTR new_default_device_id);
-
- base::Closure listener_cb_;
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
- std::string default_render_device_id_;
-
- // AudioDeviceListenerWin must be constructed and destructed on one thread.
- base::ThreadChecker thread_checker_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWin);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_DEVICE_LISTENER_WIN_H_
diff --git a/src/media/audio/win/audio_device_listener_win_unittest.cc b/src/media/audio/win/audio_device_listener_win_unittest.cc
deleted file mode 100644
index 989f8a6..0000000
--- a/src/media/audio/win/audio_device_listener_win_unittest.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/win/scoped_com_initializer.h"
-#include "base/utf_string_conversions.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/win/audio_device_listener_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const char* kNoDevice = "";
-static const char* kFirstTestDevice = "test_device_0";
-static const char* kSecondTestDevice = "test_device_1";
-
-class AudioDeviceListenerWinTest : public testing::Test {
- public:
- AudioDeviceListenerWinTest()
- : com_init_(ScopedCOMInitializer::kMTA) {
- }
-
- virtual void SetUp() {
- if (!CoreAudioUtil::IsSupported())
- return;
-
- output_device_listener_.reset(new AudioDeviceListenerWin(base::Bind(
- &AudioDeviceListenerWinTest::OnDeviceChange, base::Unretained(this))));
- }
-
- // Simulate a device change where no output devices are available.
- bool SimulateNullDefaultOutputDeviceChange() {
- return output_device_listener_->OnDefaultDeviceChanged(
- static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
- NULL) == S_OK;
- }
-
- bool SimulateDefaultOutputDeviceChange(const char* new_device_id) {
- return output_device_listener_->OnDefaultDeviceChanged(
- static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
- ASCIIToWide(new_device_id).c_str()) == S_OK;
- }
-
- void SetOutputDeviceId(std::string new_device_id) {
- output_device_listener_->default_render_device_id_ = new_device_id;
- }
-
- MOCK_METHOD0(OnDeviceChange, void());
-
- private:
- ScopedCOMInitializer com_init_;
- scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWinTest);
-};
-
-// Simulate a device change events and ensure we get the right callbacks.
-TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
- if (!CoreAudioUtil::IsSupported())
- return;
-
- SetOutputDeviceId(kNoDevice);
- EXPECT_CALL(*this, OnDeviceChange()).Times(1);
- ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
-
- testing::Mock::VerifyAndClear(this);
- EXPECT_CALL(*this, OnDeviceChange()).Times(1);
- ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
-
- // The second device event should be ignored since the device id has not
- // changed.
- ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
-}
-
-// Ensure that null output device changes don't crash. Simulates the situation
-// where we have no output devices.
-TEST_F(AudioDeviceListenerWinTest, NullOutputDeviceChange) {
- if (!CoreAudioUtil::IsSupported())
- return;
-
- SetOutputDeviceId(kNoDevice);
- EXPECT_CALL(*this, OnDeviceChange()).Times(0);
- ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
-
- testing::Mock::VerifyAndClear(this);
- EXPECT_CALL(*this, OnDeviceChange()).Times(1);
- ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
-
- testing::Mock::VerifyAndClear(this);
- EXPECT_CALL(*this, OnDeviceChange()).Times(1);
- ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_low_latency_input_win.cc b/src/media/audio/win/audio_low_latency_input_win.cc
deleted file mode 100644
index 1e5464f..0000000
--- a/src/media/audio/win/audio_low_latency_input_win.cc
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_low_latency_input_win.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/utf_string_conversions.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/avrt_wrapper_win.h"
-
-using base::win::ScopedComPtr;
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-WASAPIAudioInputStream::WASAPIAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params,
- const std::string& device_id)
- : manager_(manager),
- capture_thread_(NULL),
- opened_(false),
- started_(false),
- endpoint_buffer_size_frames_(0),
- device_id_(device_id),
- sink_(NULL) {
- DCHECK(manager_);
-
- // Load the Avrt DLL if not already loaded. Required to support MMCSS.
- bool avrt_init = avrt::Initialize();
- DCHECK(avrt_init) << "Failed to load the Avrt.dll";
-
- // Set up the desired capture format specified by the client.
- format_.nSamplesPerSec = params.sample_rate();
- format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.wBitsPerSample = params.bits_per_sample();
- format_.nChannels = params.channels();
- format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
- format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
- format_.cbSize = 0;
-
- // Size in bytes of each audio frame.
- frame_size_ = format_.nBlockAlign;
- // Store size of audio packets which we expect to get from the audio
- // endpoint device in each capture event.
- packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
- packet_size_bytes_ = params.GetBytesPerBuffer();
- DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
- DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
-
- // All events are auto-reset events and non-signaled initially.
-
- // Create the event which the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
- DCHECK(audio_samples_ready_event_.IsValid());
-
- // Create the event which will be set in Stop() when capturing shall stop.
- stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
- DCHECK(stop_capture_event_.IsValid());
-
- ms_to_frame_count_ = static_cast<double>(params.sample_rate()) / 1000.0;
-
- LARGE_INTEGER performance_frequency;
- if (QueryPerformanceFrequency(&performance_frequency)) {
- perf_count_to_100ns_units_ =
- (10000000.0 / static_cast<double>(performance_frequency.QuadPart));
- } else {
- LOG(ERROR) << "High-resolution performance counters are not supported.";
- perf_count_to_100ns_units_ = 0.0;
- }
-}
-
-WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
-
-bool WASAPIAudioInputStream::Open() {
- DCHECK(CalledOnValidThread());
- // Verify that we are not already opened.
- if (opened_)
- return false;
-
- // Obtain a reference to the IMMDevice interface of the capturing
- // device with the specified unique identifier or role which was
- // set at construction.
- HRESULT hr = SetCaptureDevice();
- if (FAILED(hr))
- return false;
-
- // Obtain an IAudioClient interface which enables us to create and initialize
- // an audio stream between an audio application and the audio engine.
- hr = ActivateCaptureDevice();
- if (FAILED(hr))
- return false;
-
- // Retrieve the stream format which the audio engine uses for its internal
- // processing/mixing of shared-mode streams. This function call is for
- // diagnostic purposes only and only in debug mode.
-#ifndef NDEBUG
- hr = GetAudioEngineStreamFormat();
-#endif
-
- // Verify that the selected audio endpoint supports the specified format
- // set during construction.
- if (!DesiredFormatIsSupported()) {
- return false;
- }
-
- // Initialize the audio stream between the client and the device using
- // shared mode and a lowest possible glitch-free latency.
- hr = InitializeAudioEngine();
-
- opened_ = SUCCEEDED(hr);
- return opened_;
-}
-
-void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
- DCHECK(CalledOnValidThread());
- DCHECK(callback);
- DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
- if (!opened_)
- return;
-
- if (started_)
- return;
-
- sink_ = callback;
-
- // Create and start the thread that will drive the capturing by waiting for
- // capture events.
- capture_thread_ =
- new base::DelegateSimpleThread(this, "wasapi_capture_thread");
- capture_thread_->Start();
-
- // Start streaming data between the endpoint buffer and the audio engine.
- HRESULT hr = audio_client_->Start();
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
-
- started_ = SUCCEEDED(hr);
-}
-
-void WASAPIAudioInputStream::Stop() {
- DCHECK(CalledOnValidThread());
- if (!started_)
- return;
-
- // Shut down the capture thread.
- if (stop_capture_event_.IsValid()) {
- SetEvent(stop_capture_event_.Get());
- }
-
- // Stop the input audio streaming.
- HRESULT hr = audio_client_->Stop();
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to stop input streaming.";
- }
-
- // Wait until the thread completes and perform cleanup.
- if (capture_thread_) {
- SetEvent(stop_capture_event_.Get());
- capture_thread_->Join();
- capture_thread_ = NULL;
- }
-
- started_ = false;
-}
-
-void WASAPIAudioInputStream::Close() {
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- Stop();
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
-
- // Inform the audio manager that we have been closed. This will cause our
- // destruction.
- manager_->ReleaseInputStream(this);
-}
-
-double WASAPIAudioInputStream::GetMaxVolume() {
- // Verify that Open() has been called succesfully, to ensure that an audio
- // session exists and that an ISimpleAudioVolume interface has been created.
- DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
- if (!opened_)
- return 0.0;
-
- // The effective volume value is always in the range 0.0 to 1.0, hence
- // we can return a fixed value (=1.0) here.
- return 1.0;
-}
-
-void WASAPIAudioInputStream::SetVolume(double volume) {
- DVLOG(1) << "SetVolume(volume=" << volume << ")";
- DCHECK(CalledOnValidThread());
- DCHECK_GE(volume, 0.0);
- DCHECK_LE(volume, 1.0);
-
- DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
- if (!opened_)
- return;
-
- // Set a new master volume level. Valid volume levels are in the range
- // 0.0 to 1.0. Ignore volume-change events.
- HRESULT hr = simple_audio_volume_->SetMasterVolume(static_cast<float>(volume),
- NULL);
- DLOG_IF(WARNING, FAILED(hr)) << "Failed to set new input master volume.";
-
- // Update the AGC volume level based on the last setting above. Note that,
- // the volume-level resolution is not infinite and it is therefore not
- // possible to assume that the volume provided as input parameter can be
- // used directly. Instead, a new query to the audio hardware is required.
- // This method does nothing if AGC is disabled.
- UpdateAgcVolume();
-}
-
-double WASAPIAudioInputStream::GetVolume() {
- DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
- if (!opened_)
- return 0.0;
-
- // Retrieve the current volume level. The value is in the range 0.0 to 1.0.
- float level = 0.0f;
- HRESULT hr = simple_audio_volume_->GetMasterVolume(&level);
- DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
-
- return static_cast<double>(level);
-}
-
-// static
-int WASAPIAudioInputStream::HardwareSampleRate(
- const std::string& device_id) {
- base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
-
- return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
-}
-
-// static
-uint32 WASAPIAudioInputStream::HardwareChannelCount(
- const std::string& device_id) {
- base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
-
- return static_cast<uint32>(audio_engine_mix_format->nChannels);
-}
-
-// static
-HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format) {
- // It is assumed that this static method is called from a COM thread, i.e.,
- // CoInitializeEx() is not called here to avoid STA/MTA conflicts.
- ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- if (FAILED(hr))
- return hr;
-
- ScopedComPtr<IMMDevice> endpoint_device;
- if (device_id == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device.Receive());
- } else {
- // Retrieve a capture endpoint device that is specified by an endpoint
- // device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
- endpoint_device.Receive());
- }
- if (FAILED(hr))
- return hr;
-
- ScopedComPtr<IAudioClient> audio_client;
- hr = endpoint_device->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr;
-}
-
-void WASAPIAudioInputStream::Run() {
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Increase the thread priority.
- capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
-
- // Enable MMCSS to ensure that this thread receives prioritized access to
- // CPU resources.
- DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
- bool mmcss_is_ok =
- (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
- if (!mmcss_is_ok) {
- // Failed to enable MMCSS on this thread. It is not fatal but can lead
- // to reduced QoS at high load.
- DWORD err = GetLastError();
- LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
- }
-
- // Allocate a buffer with a size that enables us to take care of cases like:
- // 1) The recorded buffer size is smaller, or does not match exactly with,
- // the selected packet size used in each callback.
- // 2) The selected buffer size is larger than the recorded buffer size in
- // each event.
- size_t buffer_frame_index = 0;
- size_t capture_buffer_size = std::max(
- 2 * endpoint_buffer_size_frames_ * frame_size_,
- 2 * packet_size_frames_ * frame_size_);
- scoped_array<uint8> capture_buffer(new uint8[capture_buffer_size]);
-
- LARGE_INTEGER now_count;
- bool recording = true;
- bool error = false;
- double volume = GetVolume();
- HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_};
-
- while (recording && !error) {
- HRESULT hr = S_FALSE;
-
- // Wait for a close-down event or a new capture event.
- DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
- switch (wait_result) {
- case WAIT_FAILED:
- error = true;
- break;
- case WAIT_OBJECT_0 + 0:
- // |stop_capture_event_| has been set.
- recording = false;
- break;
- case WAIT_OBJECT_0 + 1:
- {
- // |audio_samples_ready_event_| has been set.
- BYTE* data_ptr = NULL;
- UINT32 num_frames_to_read = 0;
- DWORD flags = 0;
- UINT64 device_position = 0;
- UINT64 first_audio_frame_timestamp = 0;
-
- // Retrieve the amount of data in the capture endpoint buffer,
- // replace it with silence if required, create callbacks for each
- // packet and store non-delivered data for the next event.
- hr = audio_capture_client_->GetBuffer(&data_ptr,
- &num_frames_to_read,
- &flags,
- &device_position,
- &first_audio_frame_timestamp);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to get data from the capture buffer";
- continue;
- }
-
- if (num_frames_to_read != 0) {
- size_t pos = buffer_frame_index * frame_size_;
- size_t num_bytes = num_frames_to_read * frame_size_;
- DCHECK_GE(capture_buffer_size, pos + num_bytes);
-
- if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
- // Clear out the local buffer since silence is reported.
- memset(&capture_buffer[pos], 0, num_bytes);
- } else {
- // Copy captured data from audio engine buffer to local buffer.
- memcpy(&capture_buffer[pos], data_ptr, num_bytes);
- }
-
- buffer_frame_index += num_frames_to_read;
- }
-
- hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
-
- // Derive a delay estimate for the captured audio packet.
- // The value contains two parts (A+B), where A is the delay of the
- // first audio frame in the packet and B is the extra delay
- // contained in any stored data. Unit is in audio frames.
- QueryPerformanceCounter(&now_count);
- double audio_delay_frames =
- ((perf_count_to_100ns_units_ * now_count.QuadPart -
- first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ +
- buffer_frame_index - num_frames_to_read;
-
- // Update the AGC volume level once every second. Note that,
- // |volume| is also updated each time SetVolume() is called
- // through IPC by the render-side AGC.
- QueryAgcVolume(&volume);
-
- // Deliver captured data to the registered consumer using a packet
- // size which was specified at construction.
- uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
- while (buffer_frame_index >= packet_size_frames_) {
- uint8* audio_data =
- reinterpret_cast<uint8*>(capture_buffer.get());
-
- // Deliver data packet, delay estimation and volume level to
- // the user.
- sink_->OnData(this,
- audio_data,
- packet_size_bytes_,
- delay_frames * frame_size_,
- volume);
-
- // Store parts of the recorded data which can't be delivered
- // using the current packet size. The stored section will be used
- // either in the next while-loop iteration or in the next
- // capture event.
- memmove(&capture_buffer[0],
- &capture_buffer[packet_size_bytes_],
- (buffer_frame_index - packet_size_frames_) * frame_size_);
-
- buffer_frame_index -= packet_size_frames_;
- delay_frames -= packet_size_frames_;
- }
- }
- break;
- default:
- error = true;
- break;
- }
- }
-
- if (recording && error) {
- // TODO(henrika): perhaps it worth improving the cleanup here by e.g.
- // stopping the audio client, joining the thread etc.?
- NOTREACHED() << "WASAPI capturing failed with error code "
- << GetLastError();
- }
-
- // Disable MMCSS.
- if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
- PLOG(WARNING) << "Failed to disable MMCSS";
- }
-}
-
-void WASAPIAudioInputStream::HandleError(HRESULT err) {
- NOTREACHED() << "Error code: " << err;
- if (sink_)
- sink_->OnError(this, static_cast<int>(err));
-}
-
-HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
- ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // Retrieve the IMMDevice by using the specified role or the specified
- // unique endpoint device-identification string.
- // TODO(henrika): possibly add support for the eCommunications as well.
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture,
- eConsole,
- endpoint_device_.Receive());
- } else {
- // Retrieve a capture endpoint device that is specified by an endpoint
- // device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
- }
-
- if (FAILED(hr))
- return hr;
-
- // Verify that the audio endpoint device is active, i.e., the audio
- // adapter that connects to the endpoint device is present and enabled.
- DWORD state = DEVICE_STATE_DISABLED;
- hr = endpoint_device_->GetState(&state);
- if (SUCCEEDED(hr)) {
- if (!(state & DEVICE_STATE_ACTIVE)) {
- DLOG(ERROR) << "Selected capture device is not active.";
- hr = E_ACCESSDENIED;
- }
- }
- }
-
- return hr;
-}
-
-HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() {
- // Creates and activates an IAudioClient COM object given the selected
- // capture endpoint device.
- HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client_.ReceiveVoid());
- return hr;
-}
-
-HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() {
- HRESULT hr = S_OK;
-#ifndef NDEBUG
- // The GetMixFormat() method retrieves the stream format that the
- // audio engine uses for its internal processing of shared-mode streams.
- // The method always uses a WAVEFORMATEXTENSIBLE structure, instead
- // of a stand-alone WAVEFORMATEX structure, to specify the format.
- // An WAVEFORMATEXTENSIBLE structure can specify both the mapping of
- // channels to speakers and the number of bits of precision in each sample.
- base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
- hr = audio_client_->GetMixFormat(
- reinterpret_cast<WAVEFORMATEX**>(&format_ex));
-
- // See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH
- // for details on the WAVE file format.
- WAVEFORMATEX format = format_ex->Format;
- DVLOG(2) << "WAVEFORMATEX:";
- DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag;
- DVLOG(2) << " nChannels : " << format.nChannels;
- DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec;
- DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec;
- DVLOG(2) << " nBlockAlign : " << format.nBlockAlign;
- DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample;
- DVLOG(2) << " cbSize : " << format.cbSize;
-
- DVLOG(2) << "WAVEFORMATEXTENSIBLE:";
- DVLOG(2) << " wValidBitsPerSample: " <<
- format_ex->Samples.wValidBitsPerSample;
- DVLOG(2) << " dwChannelMask : 0x" << std::hex <<
- format_ex->dwChannelMask;
- if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM)
- DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM";
- else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
- DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
- else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX)
- DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX";
-#endif
- return hr;
-}
-
-bool WASAPIAudioInputStream::DesiredFormatIsSupported() {
- // An application that uses WASAPI to manage shared-mode streams can rely
- // on the audio engine to perform only limited format conversions. The audio
- // engine can convert between a standard PCM sample size used by the
- // application and the floating-point samples that the engine uses for its
- // internal processing. However, the format for an application stream
- // typically must have the same number of channels and the same sample
- // rate as the stream format used by the device.
- // Many audio devices support both PCM and non-PCM stream formats. However,
- // the audio engine can mix only PCM streams.
- base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
- HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
- &format_,
- &closest_match);
- DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
- << "but a closest match exists.";
- return (hr == S_OK);
-}
-
-HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
- // Initialize the audio stream between the client and the device.
- // We connect indirectly through the audio engine by using shared mode
- // and WASAPI is initialized in an event driven mode.
- // Note that, |hnsBufferDuration| is set of 0, which ensures that the
- // buffer is never smaller than the minimum buffer size needed to ensure
- // that glitches do not occur between the periodic processing passes.
- // This setting should lead to lowest possible latency.
- HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
- AUDCLNT_STREAMFLAGS_NOPERSIST,
- 0, // hnsBufferDuration
- 0,
- &format_,
- NULL);
- if (FAILED(hr))
- return hr;
-
- // Retrieve the length of the endpoint buffer shared between the client
- // and the audio engine. The buffer length determines the maximum amount
- // of capture data that the audio engine can read from the endpoint buffer
- // during a single processing pass.
- // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
- hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
- if (FAILED(hr))
- return hr;
- DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
- << " [frames]";
-
-#ifndef NDEBUG
- // The period between processing passes by the audio engine is fixed for a
- // particular audio endpoint device and represents the smallest processing
- // quantum for the audio engine. This period plus the stream latency between
- // the buffer and endpoint device represents the minimum possible latency
- // that an audio application can achieve.
- // TODO(henrika): possibly remove this section when all parts are ready.
- REFERENCE_TIME device_period_shared_mode = 0;
- REFERENCE_TIME device_period_exclusive_mode = 0;
- HRESULT hr_dbg = audio_client_->GetDevicePeriod(
- &device_period_shared_mode, &device_period_exclusive_mode);
- if (SUCCEEDED(hr_dbg)) {
- DVLOG(1) << "device period: "
- << static_cast<double>(device_period_shared_mode / 10000.0)
- << " [ms]";
- }
-
- REFERENCE_TIME latency = 0;
- hr_dbg = audio_client_->GetStreamLatency(&latency);
- if (SUCCEEDED(hr_dbg)) {
- DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
- << " [ms]";
- }
-#endif
-
- // Set the event handle that the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
- if (FAILED(hr))
- return hr;
-
- // Get access to the IAudioCaptureClient interface. This interface
- // enables us to read input data from the capture endpoint buffer.
- hr = audio_client_->GetService(__uuidof(IAudioCaptureClient),
- audio_capture_client_.ReceiveVoid());
- if (FAILED(hr))
- return hr;
-
- // Obtain a reference to the ISimpleAudioVolume interface which enables
- // us to control the master volume level of an audio session.
- hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume),
- simple_audio_volume_.ReceiveVoid());
- return hr;
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_low_latency_input_win.h b/src/media/audio/win/audio_low_latency_input_win.h
deleted file mode 100644
index e83fc92..0000000
--- a/src/media/audio/win/audio_low_latency_input_win.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Implementation of AudioInputStream for Windows using Windows Core Audio
-// WASAPI for low latency capturing.
-//
-// Overview of operation:
-//
-// - An object of WASAPIAudioInputStream is created by the AudioManager
-// factory.
-// - Next some thread will call Open(), at that point the underlying
-// Core Audio APIs are utilized to create two WASAPI interfaces called
-// IAudioClient and IAudioCaptureClient.
-// - Then some thread will call Start(sink).
-// A thread called "wasapi_capture_thread" is started and this thread listens
-// on an event signal which is set periodically by the audio engine for
-// each recorded data packet. As a result, data samples will be provided
-// to the registered sink.
-// - At some point, a thread will call Stop(), which stops and joins the
-// capture thread and at the same time stops audio streaming.
-// - The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-//
-// Implementation notes:
-//
-// - The minimum supported client is Windows Vista.
-// - This implementation is single-threaded, hence:
-// o Construction and destruction must take place from the same thread.
-// o It is recommended to call all APIs from the same thread as well.
-// - It is recommended to first acquire the native sample rate of the default
-// input device and then use the same rate when creating this object. Use
-// WASAPIAudioInputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-//
-// Core Audio API details:
-//
-// - Utilized MMDevice interfaces:
-// o IMMDeviceEnumerator
-// o IMMDevice
-// - Utilized WASAPI interfaces:
-// o IAudioClient
-// o IAudioCaptureClient
-// - The stream is initialized in shared mode and the processing of the
-// audio buffer is event driven.
-// - The Multimedia Class Scheduler service (MMCSS) is utilized to boost
-// the priority of the capture thread.
-// - Audio applications that use the MMDevice API and WASAPI typically use
-// the ISimpleAudioVolume interface to manage stream volume levels on a
-// per-session basis. It is also possible to use of the IAudioEndpointVolume
-// interface to control the master volume level of an audio endpoint device.
-// This implementation is using the ISimpleAudioVolume interface.
-// MSDN states that "In rare cases, a specialized audio application might
-// require the use of the IAudioEndpointVolume".
-//
-#ifndef MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
-
-#include <Audioclient.h>
-#include <MMDeviceAPI.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/threading/non_thread_safe.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_com_initializer.h"
-#include "base/win/scoped_comptr.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_input_stream_impl.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// AudioInputStream implementation using Windows Core Audio APIs.
-class MEDIA_EXPORT WASAPIAudioInputStream
- : public AudioInputStreamImpl,
- public base::DelegateSimpleThread::Delegate,
- NON_EXPORTED_BASE(public base::NonThreadSafe) {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- WASAPIAudioInputStream(AudioManagerWin* manager,
- const AudioParameters& params,
- const std::string& device_id);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioInputStream::Close().
- virtual ~WASAPIAudioInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
-
- // Retrieves the sample rate used by the audio engine for its internal
- // processing/mixing of shared-mode streams given a specifed device.
- static int HardwareSampleRate(const std::string& device_id);
-
- // Retrieves the number of audio channels used by the audio engine for its
- // internal processing/mixing of shared-mode streams given a specified device.
- static uint32 HardwareChannelCount(const std::string& device_id);
-
- bool started() const { return started_; }
-
- private:
- // DelegateSimpleThread::Delegate implementation.
- virtual void Run() OVERRIDE;
-
- // Issues the OnError() callback to the |sink_|.
- void HandleError(HRESULT err);
-
- // The Open() method is divided into these sub methods.
- HRESULT SetCaptureDevice();
- HRESULT ActivateCaptureDevice();
- HRESULT GetAudioEngineStreamFormat();
- bool DesiredFormatIsSupported();
- HRESULT InitializeAudioEngine();
-
- // Retrieves the stream format that the audio engine uses for its internal
- // processing/mixing of shared-mode streams.
- static HRESULT GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
-
- // Capturing is driven by this thread (which has no message loop).
- // All OnData() callbacks will be called from this thread.
- base::DelegateSimpleThread* capture_thread_;
-
- // Contains the desired audio format which is set up at construction.
- WAVEFORMATEX format_;
-
- bool opened_;
- bool started_;
-
- // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM)
- size_t frame_size_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the user received in each
- // OnData() callback.
- size_t packet_size_frames_;
-
- // Size in bytes of each audio packet.
- size_t packet_size_bytes_;
-
- // Length of the audio endpoint buffer.
- size_t endpoint_buffer_size_frames_;
-
- // Contains the unique name of the selected endpoint device.
- // Note that AudioManagerBase::kDefaultDeviceId represents the default
- // device role and is not a valid ID as such.
- std::string device_id_;
-
- // Conversion factor used in delay-estimation calculations.
- // Converts a raw performance counter value to 100-nanosecond unit.
- double perf_count_to_100ns_units_;
-
- // Conversion factor used in delay-estimation calculations.
- // Converts from milliseconds to audio frames.
- double ms_to_frame_count_;
-
- // Pointer to the object that will receive the recorded audio samples.
- AudioInputCallback* sink_;
-
- // Windows Multimedia Device (MMDevice) API interfaces.
-
- // An IMMDevice interface which represents an audio endpoint device.
- base::win::ScopedComPtr<IMMDevice> endpoint_device_;
-
- // Windows Audio Session API (WASAP) interfaces.
-
- // An IAudioClient interface which enables a client to create and initialize
- // an audio stream between an audio application and the audio engine.
- base::win::ScopedComPtr<IAudioClient> audio_client_;
-
- // The IAudioCaptureClient interface enables a client to read input data
- // from a capture endpoint buffer.
- base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
-
- // The ISimpleAudioVolume interface enables a client to control the
- // master volume level of an audio session.
- // The volume-level is a value in the range 0.0 to 1.0.
- // This interface does only work with shared-mode streams.
- base::win::ScopedComPtr<ISimpleAudioVolume> simple_audio_volume_;
-
- // The audio engine will signal this event each time a buffer has been
- // recorded.
- base::win::ScopedHandle audio_samples_ready_event_;
-
- // This event will be signaled when capturing shall stop.
- base::win::ScopedHandle stop_capture_event_;
-
- DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_INPUT_WIN_H_
diff --git a/src/media/audio/win/audio_low_latency_input_win_unittest.cc b/src/media/audio/win/audio_low_latency_input_win_unittest.cc
deleted file mode 100644
index 6eaa352..0000000
--- a/src/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <windows.h>
-#include <mmsystem.h>
-
-#include "base/basictypes.h"
-#include "base/environment.h"
-#include "base/file_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/test/test_timeouts.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/win/audio_low_latency_input_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/seekable_buffer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::win::ScopedCOMInitializer;
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::AtLeast;
-using ::testing::Gt;
-using ::testing::NotNull;
-
-namespace media {
-
-ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
- if (++*count >= limit) {
- loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
- }
-}
-
-class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
- public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
- MOCK_METHOD2(OnError, void(AudioInputStream* stream, int code));
-};
-
-// This audio sink implementation should be used for manual tests only since
-// the recorded data is stored on a raw binary data file.
-class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
- public:
- // Allocate space for ~10 seconds of data @ 48kHz in stereo:
- // 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
- static const size_t kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
-
- explicit WriteToFileAudioSink(const char* file_name)
- : buffer_(0, kMaxBufferSize),
- bytes_to_write_(0) {
- FilePath file_path;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_path));
- file_path = file_path.AppendASCII(file_name);
- binary_file_ = file_util::OpenFile(file_path, "wb");
- DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
- LOG(INFO) << ">> Output file: " << file_path.value()
- << " has been created.";
- }
-
- virtual ~WriteToFileAudioSink() {
- size_t bytes_written = 0;
- while (bytes_written < bytes_to_write_) {
- const uint8* chunk;
- int chunk_size;
-
- // Stop writing if no more data is available.
- if (!buffer_.GetCurrentChunk(&chunk, &chunk_size))
- break;
-
- // Write recorded data chunk to the file and prepare for next chunk.
- fwrite(chunk, 1, chunk_size, binary_file_);
- buffer_.Seek(chunk_size);
- bytes_written += chunk_size;
- }
- file_util::CloseFile(binary_file_);
- }
-
- // AudioInputStream::AudioInputCallback implementation.
- virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
- uint32 hardware_delay_bytes,
- double volume) {
- // Store data data in a temporary buffer to avoid making blocking
- // fwrite() calls in the audio callback. The complete buffer will be
- // written to file in the destructor.
- if (buffer_.Append(src, size)) {
- bytes_to_write_ += size;
- }
- }
-
- virtual void OnClose(AudioInputStream* stream) {}
- virtual void OnError(AudioInputStream* stream, int code) {}
-
- private:
- media::SeekableBuffer buffer_;
- FILE* binary_file_;
- size_t bytes_to_write_;
-};
-
-// Convenience method which ensures that we are not running on the build
-// bots and that at least one valid input device can be found. We also
-// verify that we are not running on XP since the low-latency (WASAPI-
-// based) version requires Windows Vista or higher.
-static bool CanRunAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This tests requires Windows Vista or higher.";
- return false;
- }
- // TODO(henrika): note that we use Wave today to query the number of
- // existing input devices.
- bool input = audio_man->HasAudioInputDevices();
- LOG_IF(WARNING, !input) << "No input device detected.";
- return input;
-}
-
-// Convenience method which creates a default AudioInputStream object but
-// also allows the user to modify the default settings.
-class AudioInputStreamWrapper {
- public:
- explicit AudioInputStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- channel_layout_(CHANNEL_LAYOUT_STEREO),
- bits_per_sample_(16) {
- // Use native/mixing sample rate and 10ms frame size as default.
- sample_rate_ = static_cast<int>(
- WASAPIAudioInputStream::HardwareSampleRate(
- AudioManagerBase::kDefaultDeviceId));
- samples_per_packet_ = sample_rate_ / 100;
- }
-
- ~AudioInputStreamWrapper() {}
-
- // Creates AudioInputStream object using default parameters.
- AudioInputStream* Create() {
- return CreateInputStream();
- }
-
- // Creates AudioInputStream object using non-default parameters where the
- // frame size is modified.
- AudioInputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
- return CreateInputStream();
- }
-
- AudioParameters::Format format() const { return format_; }
- int channels() const {
- return ChannelLayoutToChannelCount(channel_layout_);
- }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
-
- private:
- AudioInputStream* CreateInputStream() {
- AudioInputStream* ais = audio_man_->MakeAudioInputStream(
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- return ais;
- }
-
- ScopedCOMInitializer com_init_;
- AudioManager* audio_man_;
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
-};
-
-// Convenience method which creates a default AudioInputStream object.
-static AudioInputStream* CreateDefaultAudioInputStream(
- AudioManager* audio_manager) {
- AudioInputStreamWrapper aisw(audio_manager);
- AudioInputStream* ais = aisw.Create();
- return ais;
-}
-
-// Verify that we can retrieve the current hardware/mixing sample rate
-// for all available input devices.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Retrieve a list of all available input devices.
- media::AudioDeviceNames device_names;
- audio_manager->GetAudioInputDeviceNames(&device_names);
-
- // Scan all available input devices and repeat the same test for all of them.
- for (media::AudioDeviceNames::const_iterator it = device_names.begin();
- it != device_names.end(); ++it) {
- // Retrieve the hardware sample rate given a specified audio input device.
- // TODO(tommi): ensure that we don't have to cast here.
- int fs = static_cast<int>(WASAPIAudioInputStream::HardwareSampleRate(
- it->unique_id));
- EXPECT_GE(fs, 0);
- }
-}
-
-// Test Create(), Close() calling sequence.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamCreateAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- ais->Close();
-}
-
-// Test Open(), Close() calling sequence.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- EXPECT_TRUE(ais->Open());
- ais->Close();
-}
-
-// Test Open(), Start(), Close() calling sequence.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- EXPECT_TRUE(ais->Open());
- MockAudioInputCallback sink;
- ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Test Open(), Start(), Stop(), Close() calling sequence.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- EXPECT_TRUE(ais->Open());
- MockAudioInputCallback sink;
- ais->Start(&sink);
- ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// Test some additional calling sequences.
-TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioInputStream* ais = CreateDefaultAudioInputStream(audio_manager.get());
- WASAPIAudioInputStream* wais = static_cast<WASAPIAudioInputStream*>(ais);
-
- // Open(), Open() should fail the second time.
- EXPECT_TRUE(ais->Open());
- EXPECT_FALSE(ais->Open());
-
- MockAudioInputCallback sink;
-
- // Start(), Start() is a valid calling sequence (second call does nothing).
- ais->Start(&sink);
- EXPECT_TRUE(wais->started());
- ais->Start(&sink);
- EXPECT_TRUE(wais->started());
-
- // Stop(), Stop() is a valid calling sequence (second call does nothing).
- ais->Stop();
- EXPECT_FALSE(wais->started());
- ais->Stop();
- EXPECT_FALSE(wais->started());
-
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- int count = 0;
- MessageLoopForUI loop;
-
- // 10 ms packet size.
-
- // Create default WASAPI input stream which records in stereo using
- // the shared mixing rate. The default buffer size is 10ms.
- AudioInputStreamWrapper aisw(audio_manager.get());
- AudioInputStream* ais = aisw.Create();
- EXPECT_TRUE(ais->Open());
-
- MockAudioInputCallback sink;
-
- // Derive the expected size in bytes of each recorded packet.
- uint32 bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
- (aisw.bits_per_sample() / 8);
-
- // We use 10ms packets and will run the test until ten packets are received.
- // All should contain valid packets of the same size and a valid delay
- // estimate.
- EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
- ais->Start(&sink);
- loop.Run();
- ais->Stop();
-
- // Store current packet size (to be used in the subsequent tests).
- int samples_per_packet_10ms = aisw.samples_per_packet();
-
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-
- // 20 ms packet size.
-
- count = 0;
- ais = aisw.Create(2 * samples_per_packet_10ms);
- EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
- (aisw.bits_per_sample() / 8);
-
- EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
- ais->Start(&sink);
- loop.Run();
- ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-
- // 5 ms packet size.
-
- count = 0;
- ais = aisw.Create(samples_per_packet_10ms / 2);
- EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
- (aisw.bits_per_sample() / 8);
-
- EXPECT_CALL(sink, OnData(
- ais, NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
- .Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
- ais->Start(&sink);
- loop.Run();
- ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
- ais->Close();
-}
-
-// This test is intended for manual tests and should only be enabled
-// when it is required to store the captured data on a local file.
-// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
-// To include disabled tests in test execution, just invoke the test program
-// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
-// environment variable to a value greater than 0.
-TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- // Name of the output PCM file containing captured data. The output file
- // will be stored in the directory containing 'media_unittests.exe'.
- // Example of full name: \src\build\Debug\out_stereo_10sec.pcm.
- const char* file_name = "out_stereo_10sec.pcm";
-
- AudioInputStreamWrapper aisw(audio_manager.get());
- AudioInputStream* ais = aisw.Create();
- EXPECT_TRUE(ais->Open());
-
- LOG(INFO) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
- WriteToFileAudioSink file_sink(file_name);
- LOG(INFO) << ">> Speak into the default microphone while recording.";
- ais->Start(&file_sink);
- base::PlatformThread::Sleep(TestTimeouts::action_timeout());
- ais->Stop();
- LOG(INFO) << ">> Recording has stopped.";
- ais->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_low_latency_output_win.cc b/src/media/audio/win/audio_low_latency_output_win.cc
deleted file mode 100644
index 3037589..0000000
--- a/src/media/audio/win/audio_low_latency_output_win.cc
+++ /dev/null
@@ -1,956 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_low_latency_output_win.h"
-
-#include <Functiondiscoverykeys_devpkey.h>
-
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/metrics/histogram.h"
-#include "base/utf_string_conversions.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/avrt_wrapper_win.h"
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-using base::win::ScopedComPtr;
-using base::win::ScopedCOMInitializer;
-using base::win::ScopedCoMem;
-
-namespace media {
-
-typedef uint32 ChannelConfig;
-
-// Retrieves the stream format that the audio engine uses for its internal
-// processing/mixing of shared-mode streams.
-static HRESULT GetMixFormat(ERole device_role, WAVEFORMATEX** device_format) {
- // Note that we are using the IAudioClient::GetMixFormat() API to get the
- // device format in this function. It is in fact possible to be "more native",
- // and ask the endpoint device directly for its properties. Given a reference
- // to the IMMDevice interface of an endpoint object, a client can obtain a
- // reference to the endpoint object's property store by calling the
- // IMMDevice::OpenPropertyStore() method. However, I have not been able to
- // access any valuable information using this method on my HP Z600 desktop,
- // hence it feels more appropriate to use the IAudioClient::GetMixFormat()
- // approach instead.
-
- // Calling this function only makes sense for shared mode streams, since
- // if the device will be opened in exclusive mode, then the application
- // specified format is used instead. However, the result of this method can
- // be useful for testing purposes so we don't DCHECK here.
- DLOG_IF(WARNING, WASAPIAudioOutputStream::GetShareMode() ==
- AUDCLNT_SHAREMODE_EXCLUSIVE) <<
- "The mixing sample rate will be ignored for exclusive-mode streams.";
-
- // It is assumed that this static method is called from a COM thread, i.e.,
- // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
- ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- if (FAILED(hr))
- return hr;
-
- ScopedComPtr<IMMDevice> endpoint_device;
- hr = enumerator->GetDefaultAudioEndpoint(eRender,
- device_role,
- endpoint_device.Receive());
- if (FAILED(hr))
- return hr;
-
- ScopedComPtr<IAudioClient> audio_client;
- hr = endpoint_device->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- return SUCCEEDED(hr) ? audio_client->GetMixFormat(device_format) : hr;
-}
-
-// Retrieves an integer mask which corresponds to the channel layout the
-// audio engine uses for its internal processing/mixing of shared-mode
-// streams. This mask indicates which channels are present in the multi-
-// channel stream. The least significant bit corresponds with the Front Left
-// speaker, the next least significant bit corresponds to the Front Right
-// speaker, and so on, continuing in the order defined in KsMedia.h.
-// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
-// for more details.
-static ChannelConfig GetChannelConfig() {
- // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
- // number of channels and the mapping of channels to speakers for
- // multichannel devices.
- base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
- HRESULT hr = S_FALSE;
- hr = GetMixFormat(eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
- if (FAILED(hr))
- return 0;
-
- // The dwChannelMask member specifies which channels are present in the
- // multichannel stream. The least significant bit corresponds to the
- // front left speaker, the next least significant bit corresponds to the
- // front right speaker, and so on.
- // See http://msdn.microsoft.com/en-us/library/windows/desktop/dd757714(v=vs.85).aspx
- // for more details on the channel mapping.
- DVLOG(2) << "dwChannelMask: 0x" << std::hex << format_ex->dwChannelMask;
-
-#if !defined(NDEBUG)
- // See http://en.wikipedia.org/wiki/Surround_sound for more details on
- // how to name various speaker configurations. The list below is not complete.
- const char* speaker_config = "Undefined";
- switch (format_ex->dwChannelMask) {
- case KSAUDIO_SPEAKER_MONO:
- speaker_config = "Mono";
- break;
- case KSAUDIO_SPEAKER_STEREO:
- speaker_config = "Stereo";
- break;
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- speaker_config = "5.1 surround";
- break;
- case KSAUDIO_SPEAKER_5POINT1:
- speaker_config = "5.1";
- break;
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- speaker_config = "7.1 surround";
- break;
- case KSAUDIO_SPEAKER_7POINT1:
- speaker_config = "7.1";
- break;
- default:
- break;
- }
- DVLOG(2) << "speaker configuration: " << speaker_config;
-#endif
-
- return static_cast<ChannelConfig>(format_ex->dwChannelMask);
-}
-
-// Converts Microsoft's channel configuration to ChannelLayout.
-// This mapping is not perfect but the best we can do given the current
-// ChannelLayout enumerator and the Windows-specific speaker configurations
-// defined in ksmedia.h. Don't assume that the channel ordering in
-// ChannelLayout is exactly the same as the Windows specific configuration.
-// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
-// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
-// speakers are different in these two definitions.
-static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
- switch (config) {
- case KSAUDIO_SPEAKER_DIRECTOUT:
- return CHANNEL_LAYOUT_NONE;
- case KSAUDIO_SPEAKER_MONO:
- return CHANNEL_LAYOUT_MONO;
- case KSAUDIO_SPEAKER_STEREO:
- return CHANNEL_LAYOUT_STEREO;
- case KSAUDIO_SPEAKER_QUAD:
- return CHANNEL_LAYOUT_QUAD;
- case KSAUDIO_SPEAKER_SURROUND:
- return CHANNEL_LAYOUT_4_0;
- case KSAUDIO_SPEAKER_5POINT1:
- return CHANNEL_LAYOUT_5_1_BACK;
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- return CHANNEL_LAYOUT_5_1;
- case KSAUDIO_SPEAKER_7POINT1:
- return CHANNEL_LAYOUT_7_1_WIDE;
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- return CHANNEL_LAYOUT_7_1;
- default:
- DVLOG(1) << "Unsupported channel layout: " << config;
- return CHANNEL_LAYOUT_UNSUPPORTED;
- }
-}
-
-// static
-AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
- return AUDCLNT_SHAREMODE_EXCLUSIVE;
- return AUDCLNT_SHAREMODE_SHARED;
-}
-
-WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
- const AudioParameters& params,
- ERole device_role)
- : creating_thread_id_(base::PlatformThread::CurrentId()),
- manager_(manager),
- opened_(false),
- restart_rendering_mode_(false),
- volume_(1.0),
- endpoint_buffer_size_frames_(0),
- device_role_(device_role),
- share_mode_(GetShareMode()),
- client_channel_count_(params.channels()),
- num_written_frames_(0),
- source_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- DCHECK(manager_);
-
- // Load the Avrt DLL if not already loaded. Required to support MMCSS.
- bool avrt_init = avrt::Initialize();
- DCHECK(avrt_init) << "Failed to load the avrt.dll";
-
- if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE) {
- VLOG(1) << ">> Note that EXCLUSIVE MODE is enabled <<";
- }
-
- // Set up the desired render format specified by the client. We use the
- // WAVE_FORMAT_EXTENSIBLE structure to ensure that multiple channel ordering
- // and high precision data can be supported.
-
- // Begin with the WAVEFORMATEX structure that specifies the basic format.
- WAVEFORMATEX* format = &format_.Format;
- format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format->nChannels = client_channel_count_;
- format->nSamplesPerSec = params.sample_rate();
- format->wBitsPerSample = params.bits_per_sample();
- format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
- format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
- format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
-
- // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
- format_.Samples.wValidBitsPerSample = params.bits_per_sample();
- format_.dwChannelMask = GetChannelConfig();
- format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
-
- // Size in bytes of each audio frame.
- frame_size_ = format->nBlockAlign;
-
- // Store size (in different units) of audio packets which we expect to
- // get from the audio endpoint device in each render event.
- packet_size_frames_ = params.GetBytesPerBuffer() / format->nBlockAlign;
- packet_size_bytes_ = params.GetBytesPerBuffer();
- packet_size_ms_ = (1000.0 * packet_size_frames_) / params.sample_rate();
- DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
- DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
- DVLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
- DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms_;
-
- // All events are auto-reset events and non-signaled initially.
-
- // Create the event which the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
- DCHECK(audio_samples_render_event_.IsValid());
-
- // Create the event which will be set in Stop() when capturing shall stop.
- stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
- DCHECK(stop_render_event_.IsValid());
-}
-
-WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
-
-bool WASAPIAudioOutputStream::Open() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (opened_)
- return true;
-
- // Channel mixing is not supported, it must be handled by ChannelMixer.
- if (format_.Format.nChannels != client_channel_count_) {
- LOG(ERROR) << "Channel down-mixing is not supported.";
- return false;
- }
-
- // Create an IMMDeviceEnumerator interface and obtain a reference to
- // the IMMDevice interface of the default rendering device with the
- // specified role.
- HRESULT hr = SetRenderDevice();
- if (FAILED(hr)) {
- return false;
- }
-
- // Obtain an IAudioClient interface which enables us to create and initialize
- // an audio stream between an audio application and the audio engine.
- hr = ActivateRenderDevice();
- if (FAILED(hr)) {
- return false;
- }
-
- // Verify that the selected audio endpoint supports the specified format
- // set during construction.
- // In exclusive mode, the client can choose to open the stream in any audio
- // format that the endpoint device supports. In shared mode, the client must
- // open the stream in the mix format that is currently in use by the audio
- // engine (or a format that is similar to the mix format). The audio engine's
- // input streams and the output mix from the engine are all in this format.
- if (!DesiredFormatIsSupported()) {
- return false;
- }
-
- // Initialize the audio stream between the client and the device using
- // shared or exclusive mode and a lowest possible glitch-free latency.
- // We will enter different code paths depending on the specified share mode.
- hr = InitializeAudioEngine();
- if (FAILED(hr)) {
- return false;
- }
-
- opened_ = true;
- return true;
-}
-
-void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- CHECK(callback);
- CHECK(opened_);
-
- if (render_thread_.get()) {
- CHECK_EQ(callback, source_);
- return;
- }
-
- if (restart_rendering_mode_) {
- // The selected audio device has been removed or disabled and a new
- // default device has been enabled instead. The current implementation
- // does not to support this sequence of events. Given that Open()
- // and Start() are usually called in one sequence; it should be a very
- // rare event.
- // TODO(henrika): it is possible to extend the functionality here.
- LOG(ERROR) << "Unable to start since the selected default device has "
- "changed since Open() was called.";
- return;
- }
-
- source_ = callback;
-
- // Avoid start-up glitches by filling up the endpoint buffer with "silence"
- // before starting the stream.
- BYTE* data_ptr = NULL;
- HRESULT hr = audio_render_client_->GetBuffer(endpoint_buffer_size_frames_,
- &data_ptr);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to use rendering audio buffer: " << std::hex << hr;
- return;
- }
-
- // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
- // explicitly write silence data to the rendering buffer.
- audio_render_client_->ReleaseBuffer(endpoint_buffer_size_frames_,
- AUDCLNT_BUFFERFLAGS_SILENT);
- num_written_frames_ = endpoint_buffer_size_frames_;
-
- // Sanity check: verify that the endpoint buffer is filled with silence.
- UINT32 num_queued_frames = 0;
- audio_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK(num_queued_frames == num_written_frames_);
-
- // Create and start the thread that will drive the rendering by waiting for
- // render events.
- render_thread_.reset(
- new base::DelegateSimpleThread(this, "wasapi_render_thread"));
- render_thread_->Start();
-
- // Start streaming data between the endpoint buffer and the audio engine.
- hr = audio_client_->Start();
- if (FAILED(hr)) {
- SetEvent(stop_render_event_.Get());
- render_thread_->Join();
- render_thread_.reset();
- HandleError(hr);
- }
-}
-
-void WASAPIAudioOutputStream::Stop() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (!render_thread_.get())
- return;
-
- // Stop output audio streaming.
- HRESULT hr = audio_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop output streaming: " << std::hex << hr;
- }
-
- // Wait until the thread completes and perform cleanup.
- SetEvent(stop_render_event_.Get());
- render_thread_->Join();
- render_thread_.reset();
-
- // Ensure that we don't quit the main thread loop immediately next
- // time Start() is called.
- ResetEvent(stop_render_event_.Get());
-
- // Clear source callback, it'll be set again on the next Start() call.
- source_ = NULL;
-
- // Flush all pending data and reset the audio clock stream position to 0.
- hr = audio_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset streaming: " << std::hex << hr;
- }
-
- // Extra safety check to ensure that the buffers are cleared.
- // If the buffers are not cleared correctly, the next call to Start()
- // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
- // This check is is only needed for shared-mode streams.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- UINT32 num_queued_frames = 0;
- audio_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK_EQ(0u, num_queued_frames);
- }
-}
-
-void WASAPIAudioOutputStream::Close() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
-
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- Stop();
-
- // Inform the audio manager that we have been closed. This will cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void WASAPIAudioOutputStream::SetVolume(double volume) {
- DVLOG(1) << "SetVolume(volume=" << volume << ")";
- float volume_float = static_cast<float>(volume);
- if (volume_float < 0.0f || volume_float > 1.0f) {
- return;
- }
- volume_ = volume_float;
-}
-
-void WASAPIAudioOutputStream::GetVolume(double* volume) {
- DVLOG(1) << "GetVolume()";
- *volume = static_cast<double>(volume_);
-}
-
-// static
-int WASAPIAudioOutputStream::HardwareChannelCount() {
- // Use a WAVEFORMATEXTENSIBLE structure since it can specify both the
- // number of channels and the mapping of channels to speakers for
- // multichannel devices.
- base::win::ScopedCoMem<WAVEFORMATPCMEX> format_ex;
- HRESULT hr = GetMixFormat(
- eConsole, reinterpret_cast<WAVEFORMATEX**>(&format_ex));
- if (FAILED(hr))
- return 0;
-
- // Number of channels in the stream. Corresponds to the number of bits
- // set in the dwChannelMask.
- DVLOG(1) << "endpoint channels (out): " << format_ex->Format.nChannels;
-
- return static_cast<int>(format_ex->Format.nChannels);
-}
-
-// static
-ChannelLayout WASAPIAudioOutputStream::HardwareChannelLayout() {
- return ChannelConfigToChannelLayout(GetChannelConfig());
-}
-
-// static
-int WASAPIAudioOutputStream::HardwareSampleRate(ERole device_role) {
- base::win::ScopedCoMem<WAVEFORMATEX> format;
- HRESULT hr = GetMixFormat(device_role, &format);
- if (FAILED(hr))
- return 0;
-
- DVLOG(2) << "nSamplesPerSec: " << format->nSamplesPerSec;
- return static_cast<int>(format->nSamplesPerSec);
-}
-
-void WASAPIAudioOutputStream::Run() {
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Increase the thread priority.
- render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
-
- // Enable MMCSS to ensure that this thread receives prioritized access to
- // CPU resources.
- DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
- bool mmcss_is_ok =
- (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
- if (!mmcss_is_ok) {
- // Failed to enable MMCSS on this thread. It is not fatal but can lead
- // to reduced QoS at high load.
- DWORD err = GetLastError();
- LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
- }
-
- HRESULT hr = S_FALSE;
-
- bool playing = true;
- bool error = false;
- HANDLE wait_array[] = { stop_render_event_,
- audio_samples_render_event_ };
- UINT64 device_frequency = 0;
-
- // The IAudioClock interface enables us to monitor a stream's data
- // rate and the current position in the stream. Allocate it before we
- // start spinning.
- ScopedComPtr<IAudioClock> audio_clock;
- hr = audio_client_->GetService(__uuidof(IAudioClock),
- audio_clock.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // The device frequency is the frequency generated by the hardware clock in
- // the audio device. The GetFrequency() method reports a constant frequency.
- hr = audio_clock->GetFrequency(&device_frequency);
- }
- error = FAILED(hr);
- PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
- << std::hex << hr;
-
- // Keep rendering audio until the stop event or the stream-switch event
- // is signaled. An error event can also break the main thread loop.
- while (playing && !error) {
- // Wait for a close-down event, stream-switch event or a new render event.
- DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
- wait_array,
- FALSE,
- INFINITE);
-
- switch (wait_result) {
- case WAIT_OBJECT_0 + 0:
- // |stop_render_event_| has been set.
- playing = false;
- break;
- case WAIT_OBJECT_0 + 1:
- {
- // |audio_samples_render_event_| has been set.
- UINT32 num_queued_frames = 0;
- uint8* audio_data = NULL;
-
- // Contains how much new data we can write to the buffer without
- // the risk of overwriting previously written data that the audio
- // engine has not yet read from the buffer.
- size_t num_available_frames = 0;
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Get the padding value which represents the amount of rendering
- // data that is queued up to play in the endpoint buffer.
- hr = audio_client_->GetCurrentPadding(&num_queued_frames);
- num_available_frames =
- endpoint_buffer_size_frames_ - num_queued_frames;
- } else {
- // While the stream is running, the system alternately sends one
- // buffer or the other to the client. This form of double buffering
- // is referred to as "ping-ponging". Each time the client receives
- // a buffer from the system (triggers this event) the client must
- // process the entire buffer. Calls to the GetCurrentPadding method
- // are unnecessary because the packet size must always equal the
- // buffer size. In contrast to the shared mode buffering scheme,
- // the latency for an event-driven, exclusive-mode stream depends
- // directly on the buffer size.
- num_available_frames = endpoint_buffer_size_frames_;
- }
-
- // Check if there is enough available space to fit the packet size
- // specified by the client.
- if (FAILED(hr) || (num_available_frames < packet_size_frames_))
- continue;
-
- // Derive the number of packets we need get from the client to
- // fill up the available area in the endpoint buffer.
- // |num_packets| will always be one for exclusive-mode streams.
- size_t num_packets = (num_available_frames / packet_size_frames_);
-
- // Get data from the client/source.
- for (size_t n = 0; n < num_packets; ++n) {
- // Grab all available space in the rendering endpoint buffer
- // into which the client can write a data packet.
- hr = audio_render_client_->GetBuffer(packet_size_frames_,
- &audio_data);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to use rendering audio buffer: "
- << std::hex << hr;
- continue;
- }
-
- // Derive the audio delay which corresponds to the delay between
- // a render event and the time when the first audio sample in a
- // packet is played out through the speaker. This delay value
- // can typically be utilized by an acoustic echo-control (AEC)
- // unit at the render side.
- UINT64 position = 0;
- int audio_delay_bytes = 0;
- hr = audio_clock->GetPosition(&position, NULL);
- if (SUCCEEDED(hr)) {
- // Stream position of the sample that is currently playing
- // through the speaker.
- double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
- (static_cast<double>(position) / device_frequency);
-
- // Stream position of the last sample written to the endpoint
- // buffer. Note that, the packet we are about to receive in
- // the upcoming callback is also included.
- size_t pos_last_sample_written_frames =
- num_written_frames_ + packet_size_frames_;
-
- // Derive the actual delay value which will be fed to the
- // render client using the OnMoreData() callback.
- audio_delay_bytes = (pos_last_sample_written_frames -
- pos_sample_playing_frames) * frame_size_;
- }
-
- // Read a data packet from the registered client source and
- // deliver a delay estimate in the same callback to the client.
- // A time stamp is also stored in the AudioBuffersState. This
- // time stamp can be used at the client side to compensate for
- // the delay between the usage of the delay value and the time
- // of generation.
-
- uint32 num_filled_bytes = 0;
- const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
-
- int frames_filled = source_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
- num_filled_bytes = frames_filled * frame_size_;
- DCHECK_LE(num_filled_bytes, packet_size_bytes_);
- // Note: If this ever changes to output raw float the data must be
- // clipped and sanitized since it may come from an untrusted
- // source such as NaCl.
- audio_bus_->ToInterleaved(
- frames_filled, bytes_per_sample, audio_data);
-
- // Perform in-place, software-volume adjustments.
- media::AdjustVolume(audio_data,
- num_filled_bytes,
- audio_bus_->channels(),
- bytes_per_sample,
- volume_);
-
- // Zero out the part of the packet which has not been filled by
- // the client. Using silence is the least bad option in this
- // situation.
- if (num_filled_bytes < packet_size_bytes_) {
- memset(&audio_data[num_filled_bytes], 0,
- (packet_size_bytes_ - num_filled_bytes));
- }
-
- // Release the buffer space acquired in the GetBuffer() call.
- DWORD flags = 0;
- audio_render_client_->ReleaseBuffer(packet_size_frames_,
- flags);
-
- num_written_frames_ += packet_size_frames_;
- }
- }
- break;
- default:
- error = true;
- break;
- }
- }
-
- if (playing && error) {
- // Stop audio rendering since something has gone wrong in our main thread
- // loop. Note that, we are still in a "started" state, hence a Stop() call
- // is required to join the thread properly.
- audio_client_->Stop();
- PLOG(ERROR) << "WASAPI rendering failed.";
- }
-
- // Disable MMCSS.
- if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
- PLOG(WARNING) << "Failed to disable MMCSS";
- }
-}
-
-void WASAPIAudioOutputStream::HandleError(HRESULT err) {
- CHECK((started() && GetCurrentThreadId() == render_thread_->tid()) ||
- (!started() && GetCurrentThreadId() == creating_thread_id_));
- NOTREACHED() << "Error code: " << std::hex << err;
- if (source_)
- source_->OnError(this, static_cast<int>(err));
-}
-
-HRESULT WASAPIAudioOutputStream::SetRenderDevice() {
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- ScopedComPtr<IMMDevice> endpoint_device;
-
- // Create the IMMDeviceEnumerator interface.
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- device_enumerator.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // Retrieve the default render audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = device_enumerator->GetDefaultAudioEndpoint(
- eRender, device_role_, endpoint_device.Receive());
- if (FAILED(hr))
- return hr;
-
- // Verify that the audio endpoint device is active. That is, the audio
- // adapter that connects to the endpoint device is present and enabled.
- DWORD state = DEVICE_STATE_DISABLED;
- hr = endpoint_device->GetState(&state);
- if (SUCCEEDED(hr)) {
- if (!(state & DEVICE_STATE_ACTIVE)) {
- DLOG(ERROR) << "Selected render device is not active.";
- hr = E_ACCESSDENIED;
- }
- }
- }
-
- if (SUCCEEDED(hr)) {
- device_enumerator_ = device_enumerator;
- endpoint_device_ = endpoint_device;
- }
-
- return hr;
-}
-
-HRESULT WASAPIAudioOutputStream::ActivateRenderDevice() {
- ScopedComPtr<IAudioClient> audio_client;
-
- // Creates and activates an IAudioClient COM object given the selected
- // render endpoint device.
- HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- if (SUCCEEDED(hr)) {
- // Retrieve the stream format that the audio engine uses for its internal
- // processing/mixing of shared-mode streams.
- audio_engine_mix_format_.Reset(NULL);
- hr = audio_client->GetMixFormat(
- reinterpret_cast<WAVEFORMATEX**>(&audio_engine_mix_format_));
-
- if (SUCCEEDED(hr)) {
- audio_client_ = audio_client;
- }
- }
-
- return hr;
-}
-
-bool WASAPIAudioOutputStream::DesiredFormatIsSupported() {
- // Determine, before calling IAudioClient::Initialize(), whether the audio
- // engine supports a particular stream format.
- // In shared mode, the audio engine always supports the mix format,
- // which is stored in the |audio_engine_mix_format_| member and it is also
- // possible to receive a proposed (closest) format if the current format is
- // not supported.
- base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
- HRESULT hr = audio_client_->IsFormatSupported(
- share_mode_, reinterpret_cast<WAVEFORMATEX*>(&format_),
- reinterpret_cast<WAVEFORMATEX**>(&closest_match));
-
- // This log can only be triggered for shared mode.
- DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
- << "but a closest match exists.";
- // This log can be triggered both for shared and exclusive modes.
- DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
- if (hr == S_FALSE) {
- DVLOG(1) << "wFormatTag : " << closest_match->Format.wFormatTag;
- DVLOG(1) << "nChannels : " << closest_match->Format.nChannels;
- DVLOG(1) << "nSamplesPerSec: " << closest_match->Format.nSamplesPerSec;
- DVLOG(1) << "wBitsPerSample: " << closest_match->Format.wBitsPerSample;
- }
-
- return (hr == S_OK);
-}
-
-HRESULT WASAPIAudioOutputStream::InitializeAudioEngine() {
-#if !defined(NDEBUG)
- // The period between processing passes by the audio engine is fixed for a
- // particular audio endpoint device and represents the smallest processing
- // quantum for the audio engine. This period plus the stream latency between
- // the buffer and endpoint device represents the minimum possible latency
- // that an audio application can achieve in shared mode.
- {
- REFERENCE_TIME default_device_period = 0;
- REFERENCE_TIME minimum_device_period = 0;
- HRESULT hr_dbg = audio_client_->GetDevicePeriod(&default_device_period,
- &minimum_device_period);
- if (SUCCEEDED(hr_dbg)) {
- // Shared mode device period.
- DVLOG(1) << "shared mode (default) device period: "
- << static_cast<double>(default_device_period / 10000.0)
- << " [ms]";
- // Exclusive mode device period.
- DVLOG(1) << "exclusive mode (minimum) device period: "
- << static_cast<double>(minimum_device_period / 10000.0)
- << " [ms]";
- }
-
- REFERENCE_TIME latency = 0;
- hr_dbg = audio_client_->GetStreamLatency(&latency);
- if (SUCCEEDED(hr_dbg)) {
- DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
- << " [ms]";
- }
- }
-#endif
-
- HRESULT hr = S_FALSE;
-
- // Perform different initialization depending on if the device shall be
- // opened in shared mode or in exclusive mode.
- hr = (share_mode_ == AUDCLNT_SHAREMODE_SHARED) ?
- SharedModeInitialization() : ExclusiveModeInitialization();
- if (FAILED(hr)) {
- LOG(WARNING) << "IAudioClient::Initialize() failed: " << std::hex << hr;
- return hr;
- }
-
- // Retrieve the length of the endpoint buffer. The buffer length represents
- // the maximum amount of rendering data that the client can write to
- // the endpoint buffer during a single processing pass.
- // A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
- hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
- if (FAILED(hr))
- return hr;
- DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
- << " [frames]";
-
- // The buffer scheme for exclusive mode streams is not designed for max
- // flexibility. We only allow a "perfect match" between the packet size set
- // by the user and the actual endpoint buffer size.
- if (share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE &&
- endpoint_buffer_size_frames_ != packet_size_frames_) {
- hr = AUDCLNT_E_INVALID_SIZE;
- DLOG(ERROR) << "AUDCLNT_E_INVALID_SIZE";
- return hr;
- }
-
- // Set the event handle that the audio engine will signal each time
- // a buffer becomes ready to be processed by the client.
- hr = audio_client_->SetEventHandle(audio_samples_render_event_.Get());
- if (FAILED(hr))
- return hr;
-
- // Get access to the IAudioRenderClient interface. This interface
- // enables us to write output data to a rendering endpoint buffer.
- // The methods in this interface manage the movement of data packets
- // that contain audio-rendering data.
- hr = audio_client_->GetService(__uuidof(IAudioRenderClient),
- audio_render_client_.ReceiveVoid());
- return hr;
-}
-
-HRESULT WASAPIAudioOutputStream::SharedModeInitialization() {
- DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_SHARED);
-
- // TODO(henrika): this buffer scheme is still under development.
- // The exact details are yet to be determined based on tests with different
- // audio clients.
- int glitch_free_buffer_size_ms = static_cast<int>(packet_size_ms_ + 0.5);
- if (audio_engine_mix_format_->Format.nSamplesPerSec % 8000 == 0) {
- // Initial tests have shown that we have to add 10 ms extra to
- // ensure that we don't run empty for any packet size.
- glitch_free_buffer_size_ms += 10;
- } else if (audio_engine_mix_format_->Format.nSamplesPerSec % 11025 == 0) {
- // Initial tests have shown that we have to add 20 ms extra to
- // ensure that we don't run empty for any packet size.
- glitch_free_buffer_size_ms += 20;
- } else {
- DLOG(WARNING) << "Unsupported sample rate "
- << audio_engine_mix_format_->Format.nSamplesPerSec << " detected";
- glitch_free_buffer_size_ms += 20;
- }
- DVLOG(1) << "glitch_free_buffer_size_ms: " << glitch_free_buffer_size_ms;
- REFERENCE_TIME requested_buffer_duration =
- static_cast<REFERENCE_TIME>(glitch_free_buffer_size_ms * 10000);
-
- // Initialize the audio stream between the client and the device.
- // We connect indirectly through the audio engine by using shared mode
- // and WASAPI is initialized in an event driven mode.
- // Note that this API ensures that the buffer is never smaller than the
- // minimum buffer size needed to ensure glitch-free rendering.
- // If we requests a buffer size that is smaller than the audio engine's
- // minimum required buffer size, the method sets the buffer size to this
- // minimum buffer size rather than to the buffer size requested.
- HRESULT hr = S_FALSE;
- hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
- AUDCLNT_STREAMFLAGS_NOPERSIST,
- requested_buffer_duration,
- 0,
- reinterpret_cast<WAVEFORMATEX*>(&format_),
- NULL);
- return hr;
-}
-
-HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization() {
- DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
-
- float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
- REFERENCE_TIME requested_buffer_duration =
- static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
-
- // Initialize the audio stream between the client and the device.
- // For an exclusive-mode stream that uses event-driven buffering, the
- // caller must specify nonzero values for hnsPeriodicity and
- // hnsBufferDuration, and the values of these two parameters must be equal.
- // The Initialize method allocates two buffers for the stream. Each buffer
- // is equal in duration to the value of the hnsBufferDuration parameter.
- // Following the Initialize call for a rendering stream, the caller should
- // fill the first of the two buffers before starting the stream.
- HRESULT hr = S_FALSE;
- hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
- AUDCLNT_STREAMFLAGS_NOPERSIST,
- requested_buffer_duration,
- requested_buffer_duration,
- reinterpret_cast<WAVEFORMATEX*>(&format_),
- NULL);
- if (FAILED(hr)) {
- if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
- LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
-
- UINT32 aligned_buffer_size = 0;
- audio_client_->GetBufferSize(&aligned_buffer_size);
- DVLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
- audio_client_.Release();
-
- // Calculate new aligned periodicity. Each unit of reference time
- // is 100 nanoseconds.
- REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
- (10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
- + 0.5);
-
- // It is possible to re-activate and re-initialize the audio client
- // at this stage but we bail out with an error code instead and
- // combine it with a log message which informs about the suggested
- // aligned buffer size which should be used instead.
- DVLOG(1) << "aligned_buffer_duration: "
- << static_cast<double>(aligned_buffer_duration / 10000.0)
- << " [ms]";
- } else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
- // We will get this error if we try to use a smaller buffer size than
- // the minimum supported size (usually ~3ms on Windows 7).
- LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
- }
- }
-
- return hr;
-}
-
-std::string WASAPIAudioOutputStream::GetDeviceName(LPCWSTR device_id) const {
- std::string name;
- ScopedComPtr<IMMDevice> audio_device;
-
- // Get the IMMDevice interface corresponding to the given endpoint ID string.
- HRESULT hr = device_enumerator_->GetDevice(device_id, audio_device.Receive());
- if (SUCCEEDED(hr)) {
- // Retrieve user-friendly name of endpoint device.
- // Example: "Speakers (Realtek High Definition Audio)".
- ScopedComPtr<IPropertyStore> properties;
- hr = audio_device->OpenPropertyStore(STGM_READ, properties.Receive());
- if (SUCCEEDED(hr)) {
- PROPVARIANT friendly_name;
- PropVariantInit(&friendly_name);
- hr = properties->GetValue(PKEY_Device_FriendlyName, &friendly_name);
- if (SUCCEEDED(hr) && friendly_name.vt == VT_LPWSTR) {
- if (friendly_name.pwszVal)
- name = WideToUTF8(friendly_name.pwszVal);
- }
- PropVariantClear(&friendly_name);
- }
- }
- return name;
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_low_latency_output_win.h b/src/media/audio/win/audio_low_latency_output_win.h
deleted file mode 100644
index fb9aa3d..0000000
--- a/src/media/audio/win/audio_low_latency_output_win.h
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Implementation of AudioOutputStream for Windows using Windows Core Audio
-// WASAPI for low latency rendering.
-//
-// Overview of operation and performance:
-//
-// - An object of WASAPIAudioOutputStream is created by the AudioManager
-// factory.
-// - Next some thread will call Open(), at that point the underlying
-// Core Audio APIs are utilized to create two WASAPI interfaces called
-// IAudioClient and IAudioRenderClient.
-// - Then some thread will call Start(source).
-// A thread called "wasapi_render_thread" is started and this thread listens
-// on an event signal which is set periodically by the audio engine to signal
-// render events. As a result, OnMoreData() will be called and the registered
-// client is then expected to provide data samples to be played out.
-// - At some point, a thread will call Stop(), which stops and joins the
-// render thread and at the same time stops audio streaming.
-// - The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-// - Initial tests on Windows 7 shows that this implementation results in a
-// latency of approximately 35 ms if the selected packet size is less than
-// or equal to 20 ms. Using a packet size of 10 ms does not result in a
-// lower latency but only affects the size of the data buffer in each
-// OnMoreData() callback.
-// - A total typical delay of 35 ms contains three parts:
-// o Audio endpoint device period (~10 ms).
-// o Stream latency between the buffer and endpoint device (~5 ms).
-// o Endpoint buffer (~20 ms to ensure glitch-free rendering).
-// - Note that, if the user selects a packet size of e.g. 100 ms, the total
-// delay will be approximately 115 ms (10 + 5 + 100).
-//
-// Implementation notes:
-//
-// - The minimum supported client is Windows Vista.
-// - This implementation is single-threaded, hence:
-// o Construction and destruction must take place from the same thread.
-// o All APIs must be called from the creating thread as well.
-// - It is recommended to first acquire the native sample rate of the default
-// input device and then use the same rate when creating this object. Use
-// WASAPIAudioOutputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-// - Stream switching is not supported if the user shifts the audio device
-// after Open() is called but before Start() has been called.
-// - Stream switching can fail if streaming starts on one device with a
-// supported format (X) and the new default device - to which we would like
-// to switch - uses another format (Y), which is not supported given the
-// configured audio parameters.
-// - The audio device must be opened with the same number of channels as it
-// supports natively (see HardwareChannelCount()) otherwise Open() will fail.
-// - Support for 8-bit audio has not yet been verified and tested.
-//
-// Core Audio API details:
-//
-// - The public API methods (Open(), Start(), Stop() and Close()) must be
-// called on constructing thread. The reason is that we want to ensure that
-// the COM environment is the same for all API implementations.
-// - Utilized MMDevice interfaces:
-// o IMMDeviceEnumerator
-// o IMMDevice
-// - Utilized WASAPI interfaces:
-// o IAudioClient
-// o IAudioRenderClient
-// - The stream is initialized in shared mode and the processing of the
-// audio buffer is event driven.
-// - The Multimedia Class Scheduler service (MMCSS) is utilized to boost
-// the priority of the render thread.
-// - Audio-rendering endpoint devices can have three roles:
-// Console (eConsole), Communications (eCommunications), and Multimedia
-// (eMultimedia). Search for "Device Roles" on MSDN for more details.
-//
-// Threading details:
-//
-// - It is assumed that this class is created on the audio thread owned
-// by the AudioManager.
-// - It is a requirement to call the following methods on the same audio
-// thread: Open(), Start(), Stop(), and Close().
-// - Audio rendering is performed on the audio render thread, owned by this
-// class, and the AudioSourceCallback::OnMoreData() method will be called
-// from this thread. Stream switching also takes place on the audio-render
-// thread.
-//
-// Experimental exclusive mode:
-//
-// - It is possible to open up a stream in exclusive mode by using the
-// --enable-exclusive-audio command line flag.
-// - The internal buffering scheme is less flexible for exclusive streams.
-// Hence, some manual tuning will be required before deciding what frame
-// size to use. See the WinAudioOutputTest unit test for more details.
-// - If an application opens a stream in exclusive mode, the application has
-// exclusive use of the audio endpoint device that plays the stream.
-// - Exclusive-mode should only be utilized when the lowest possible latency
-// is important.
-// - In exclusive mode, the client can choose to open the stream in any audio
-// format that the endpoint device supports, i.e. not limited to the device's
-// current (default) configuration.
-// - Initial measurements on Windows 7 (HP Z600 workstation) have shown that
-// the lowest possible latencies we can achieve on this machine are:
-// o ~3.3333ms @ 48kHz <=> 160 audio frames per buffer.
-// o ~3.6281ms @ 44.1kHz <=> 160 audio frames per buffer.
-// - See http://msdn.microsoft.com/en-us/library/windows/desktop/dd370844(v=vs.85).aspx
-// for more details.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
-
-#include <Audioclient.h>
-#include <MMDeviceAPI.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_com_initializer.h"
-#include "base/win/scoped_comptr.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// AudioOutputStream implementation using Windows Core Audio APIs.
-class MEDIA_EXPORT WASAPIAudioOutputStream :
- public AudioOutputStream,
- public base::DelegateSimpleThread::Delegate {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- WASAPIAudioOutputStream(AudioManagerWin* manager,
- const AudioParameters& params,
- ERole device_role);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~WASAPIAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- // Retrieves the number of channels the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- static int HardwareChannelCount();
-
- // Retrieves the channel layout the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- // Note that we convert an internal channel layout mask (see ChannelMask())
- // into a Chrome-specific channel layout enumerator in this method, hence
- // the match might not be perfect.
- static ChannelLayout HardwareChannelLayout();
-
- // Retrieves the sample rate the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- static int HardwareSampleRate(ERole device_role);
-
- // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
- // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
- static AUDCLNT_SHAREMODE GetShareMode();
-
- bool started() const { return render_thread_.get() != NULL; }
-
- // Returns the number of channels the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- int GetEndpointChannelCountForTesting() { return format_.Format.nChannels; }
-
- private:
- // DelegateSimpleThread::Delegate implementation.
- virtual void Run() OVERRIDE;
-
- // Issues the OnError() callback to the |sink_|.
- void HandleError(HRESULT err);
-
- // The Open() method is divided into these sub methods.
- HRESULT SetRenderDevice();
- HRESULT ActivateRenderDevice();
- bool DesiredFormatIsSupported();
- HRESULT InitializeAudioEngine();
-
- // Called when the device will be opened in shared mode and use the
- // internal audio engine's mix format.
- HRESULT SharedModeInitialization();
-
- // Called when the device will be opened in exclusive mode and use the
- // application specified format.
- HRESULT ExclusiveModeInitialization();
-
- // Converts unique endpoint ID to user-friendly device name.
- std::string GetDeviceName(LPCWSTR device_id) const;
-
- // Contains the thread ID of the creating thread.
- base::PlatformThreadId creating_thread_id_;
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
-
- // Rendering is driven by this thread (which has no message loop).
- // All OnMoreData() callbacks will be called from this thread.
- scoped_ptr<base::DelegateSimpleThread> render_thread_;
-
- // Contains the desired audio format which is set up at construction.
- // Extended PCM waveform format structure based on WAVEFORMATEXTENSIBLE.
- // Use this for multiple channel and hi-resolution PCM data.
- WAVEFORMATPCMEX format_;
-
- // Copy of the audio format which we know the audio engine supports.
- // It is recommended to ensure that the sample rate in |format_| is identical
- // to the sample rate in |audio_engine_mix_format_|.
- base::win::ScopedCoMem<WAVEFORMATPCMEX> audio_engine_mix_format_;
-
- bool opened_;
-
- // Set to true as soon as a new default device is detected, and cleared when
- // the streaming has switched from using the old device to the new device.
- // All additional device detections during an active state are ignored to
- // ensure that the ongoing switch can finalize without disruptions.
- bool restart_rendering_mode_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
- size_t frame_size_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the source is expected to deliver
- // in each OnMoreData() callback.
- size_t packet_size_frames_;
-
- // Size in bytes of each audio packet.
- size_t packet_size_bytes_;
-
- // Size in milliseconds of each audio packet.
- float packet_size_ms_;
-
- // Length of the audio endpoint buffer.
- size_t endpoint_buffer_size_frames_;
-
- // Defines the role that the system has assigned to an audio endpoint device.
- ERole device_role_;
-
- // The sharing mode for the connection.
- // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
- // where AUDCLNT_SHAREMODE_SHARED is the default.
- AUDCLNT_SHAREMODE share_mode_;
-
- // The channel count set by the client in |params| which is provided to the
- // constructor. The client must feed the AudioSourceCallback::OnMoreData()
- // callback with PCM-data that contains this number of channels.
- int client_channel_count_;
-
- // Counts the number of audio frames written to the endpoint buffer.
- UINT64 num_written_frames_;
-
- // Pointer to the client that will deliver audio samples to be played out.
- AudioSourceCallback* source_;
-
- // An IMMDeviceEnumerator interface which represents a device enumerator.
- base::win::ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
-
- // An IMMDevice interface which represents an audio endpoint device.
- base::win::ScopedComPtr<IMMDevice> endpoint_device_;
-
- // An IAudioClient interface which enables a client to create and initialize
- // an audio stream between an audio application and the audio engine.
- base::win::ScopedComPtr<IAudioClient> audio_client_;
-
- // The IAudioRenderClient interface enables a client to write output
- // data to a rendering endpoint buffer.
- base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
-
- // The audio engine will signal this event each time a buffer becomes
- // ready to be filled by the client.
- base::win::ScopedHandle audio_samples_render_event_;
-
- // This event will be signaled when rendering shall stop.
- base::win::ScopedHandle stop_render_event_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(WASAPIAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_LOW_LATENCY_OUTPUT_WIN_H_
diff --git a/src/media/audio/win/audio_low_latency_output_win_unittest.cc b/src/media/audio/win/audio_low_latency_output_win_unittest.cc
deleted file mode 100644
index 9836c09..0000000
--- a/src/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ /dev/null
@@ -1,763 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <windows.h>
-#include <mmsystem.h>
-
-#include "base/basictypes.h"
-#include "base/environment.h"
-#include "base/file_util.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/test/test_timeouts.h"
-#include "base/time.h"
-#include "base/path_service.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_low_latency_output_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/seekable_buffer.h"
-#include "media/base/test_data_util.h"
-#include "testing/gmock_mutant.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::Between;
-using ::testing::CreateFunctor;
-using ::testing::DoAll;
-using ::testing::Gt;
-using ::testing::InvokeWithoutArgs;
-using ::testing::NotNull;
-using ::testing::Return;
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
-static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
-static const size_t kFileDurationMs = 20000;
-static const size_t kNumFileSegments = 2;
-static const int kBitsPerSample = 16;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
-static const size_t kMaxDeltaSamples = 1000;
-static const char* kDeltaTimeMsFileName = "delta_times_ms.txt";
-
-MATCHER_P(HasValidDelay, value, "") {
- // It is difficult to come up with a perfect test condition for the delay
- // estimation. For now, verify that the produced output delay is always
- // larger than the selected buffer size.
- return arg.hardware_delay_bytes > value.hardware_delay_bytes;
-}
-
-// Used to terminate a loop from a different thread than the loop belongs to.
-// |loop| should be a MessageLoopProxy.
-ACTION_P(QuitLoop, loop) {
- loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
-}
-
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-// This audio source implementation should be used for manual tests only since
-// it takes about 20 seconds to play out a file.
-class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
- public:
- explicit ReadFromFileAudioSource(const std::string& name)
- : pos_(0),
- previous_call_time_(base::Time::Now()),
- text_file_(NULL),
- elements_to_write_(0) {
- // Reads a test file from media/test/data directory.
- file_ = ReadTestDataFile(name);
-
- // Creates an array that will store delta times between callbacks.
- // The content of this array will be written to a text file at
- // destruction and can then be used for off-line analysis of the exact
- // timing of callbacks. The text file will be stored in media/test/data.
- delta_times_.reset(new int[kMaxDeltaSamples]);
- }
-
- virtual ~ReadFromFileAudioSource() {
- // Get complete file path to output file in directory containing
- // media_unittests.exe.
- FilePath file_name;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
- file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
-
- EXPECT_TRUE(!text_file_);
- text_file_ = file_util::OpenFile(file_name, "wt");
- DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
-
- // Write the array which contains delta times to a text file.
- size_t elements_written = 0;
- while (elements_written < elements_to_write_) {
- fprintf(text_file_, "%d\n", delta_times_[elements_written]);
- ++elements_written;
- }
-
- file_util::CloseFile(text_file_);
- }
-
- // AudioOutputStream::AudioSourceCallback implementation.
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- // Store time difference between two successive callbacks in an array.
- // These values will be written to a file in the destructor.
- int diff = (base::Time::Now() - previous_call_time_).InMilliseconds();
- previous_call_time_ = base::Time::Now();
- if (elements_to_write_ < kMaxDeltaSamples) {
- delta_times_[elements_to_write_] = diff;
- ++elements_to_write_;
- }
-
- int max_size =
- audio_bus->frames() * audio_bus->channels() * kBitsPerSample / 8;
-
- // Use samples read from a data file and fill up the audio buffer
- // provided to us in the callback.
- if (pos_ + static_cast<int>(max_size) > file_size())
- max_size = file_size() - pos_;
- int frames = max_size / (audio_bus->channels() * kBitsPerSample / 8);
- if (max_size) {
- audio_bus->FromInterleaved(
- file_->GetData() + pos_, frames, kBitsPerSample / 8);
- pos_ += max_size;
- }
- return frames;
- }
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
- virtual void OnError(AudioOutputStream* stream, int code) {}
-
- int file_size() { return file_->GetDataSize(); }
-
- private:
- scoped_refptr<DecoderBuffer> file_;
- scoped_array<int> delta_times_;
- int pos_;
- base::Time previous_call_time_;
- FILE* text_file_;
- size_t elements_to_write_;
-};
-
-static bool ExclusiveModeIsEnabled() {
- return (WASAPIAudioOutputStream::GetShareMode() ==
- AUDCLNT_SHAREMODE_EXCLUSIVE);
-}
-
-// Convenience method which ensures that we are not running on the build
-// bots and that at least one valid output device can be found. We also
-// verify that we are not running on XP since the low-latency (WASAPI-
-// based) version requires Windows Vista or higher.
-static bool CanRunAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This test requires Windows Vista or higher.";
- return false;
- }
-
- // TODO(henrika): note that we use Wave today to query the number of
- // existing output devices.
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output devices detected.";
- return false;
- }
-
- if (WASAPIAudioOutputStream::HardwareChannelLayout() != kChannelLayout) {
- LOG(WARNING) << "This test requires stereo audio output.";
- return false;
- }
-
- return true;
-}
-
-// Convenience method which creates a default AudioOutputStream object but
-// also allows the user to modify the default settings.
-class AudioOutputStreamWrapper {
- public:
- explicit AudioOutputStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- channel_layout_(kChannelLayout),
- bits_per_sample_(kBitsPerSample) {
- // Use native/mixing sample rate and 10ms frame size as default.
- sample_rate_ = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate(eConsole));
- samples_per_packet_ = sample_rate_ / 100;
- DCHECK(sample_rate_);
- }
-
- ~AudioOutputStreamWrapper() {}
-
- // Creates AudioOutputStream object using default parameters.
- AudioOutputStream* Create() {
- return CreateOutputStream();
- }
-
- // Creates AudioOutputStream object using non-default parameters where the
- // frame size is modified.
- AudioOutputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
- return CreateOutputStream();
- }
-
- // Creates AudioOutputStream object using non-default parameters where the
- // sample rate and frame size are modified.
- AudioOutputStream* Create(int sample_rate, int samples_per_packet) {
- sample_rate_ = sample_rate;
- samples_per_packet_ = samples_per_packet;
- return CreateOutputStream();
- }
-
- AudioParameters::Format format() const { return format_; }
- int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
-
- private:
- AudioOutputStream* CreateOutputStream() {
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_));
- EXPECT_TRUE(aos);
- return aos;
- }
-
- ScopedCOMInitializer com_init_;
- AudioManager* audio_man_;
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
-};
-
-// Convenience method which creates a default AudioOutputStream object.
-static AudioOutputStream* CreateDefaultAudioOutputStream(
- AudioManager* audio_manager) {
- AudioOutputStreamWrapper aosw(audio_manager);
- AudioOutputStream* aos = aosw.Create();
- return aos;
-}
-
-// Verify that we can retrieve the current hardware/mixing sample rate
-// for all supported device roles. The ERole enumeration defines constants
-// that indicate the role that the system/user has assigned to an audio
-// endpoint device.
-// TODO(henrika): modify this test when we support full device enumeration.
-TEST(WASAPIAudioOutputStreamTest, HardwareSampleRate) {
- // Skip this test in exclusive mode since the resulting rate is only utilized
- // for shared mode streams.
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()) || ExclusiveModeIsEnabled())
- return;
-
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Default device intended for games, system notification sounds,
- // and voice commands.
- int fs = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate(eConsole));
- EXPECT_GE(fs, 0);
-
- // Default communication device intended for e.g. VoIP communication.
- fs = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate(eCommunications));
- EXPECT_GE(fs, 0);
-
- // Multimedia device for music, movies and live music recording.
- fs = static_cast<int>(
- WASAPIAudioOutputStream::HardwareSampleRate(eMultimedia));
- EXPECT_GE(fs, 0);
-}
-
-// Test Create(), Close() calling sequence.
-TEST(WASAPIAudioOutputStreamTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
- aos->Close();
-}
-
-// Verify that the created object is configured to use the same number of
-// audio channels as is reported by the static HardwareChannelCount() method.
-TEST(WASAPIAudioOutputStreamTest, HardwareChannelCount) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // First, verify that we can read a valid native/hardware channel-count.
- int hardware_channel_count = WASAPIAudioOutputStream::HardwareChannelCount();
- EXPECT_GE(hardware_channel_count, 1);
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
- WASAPIAudioOutputStream* aos =
- static_cast<WASAPIAudioOutputStream*>(aosw.Create());
-
- // Next, ensure that the created output stream object is really using the
- // hardware channel-count.
- EXPECT_EQ(hardware_channel_count, aos->GetEndpointChannelCountForTesting());
- aos->Close();
-}
-
-// Test Open(), Close() calling sequence.
-TEST(WASAPIAudioOutputStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
- EXPECT_TRUE(aos->Open());
- aos->Close();
-}
-
-// Test Open(), Start(), Close() calling sequence.
-TEST(WASAPIAudioOutputStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos, _))
- .Times(0);
- aos->Start(&source);
- aos->Close();
-}
-
-// Test Open(), Start(), Stop(), Close() calling sequence.
-TEST(WASAPIAudioOutputStreamTest, OpenStartStopAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos, _))
- .Times(0);
- aos->Start(&source);
- aos->Stop();
- aos->Close();
-}
-
-// Test SetVolume(), GetVolume()
-TEST(WASAPIAudioOutputStreamTest, Volume) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
-
- // Initial volume should be full volume (1.0).
- double volume = 0.0;
- aos->GetVolume(&volume);
- EXPECT_EQ(1.0, volume);
-
- // Verify some valid volume settings.
- aos->SetVolume(0.0);
- aos->GetVolume(&volume);
- EXPECT_EQ(0.0, volume);
-
- aos->SetVolume(0.5);
- aos->GetVolume(&volume);
- EXPECT_EQ(0.5, volume);
-
- aos->SetVolume(1.0);
- aos->GetVolume(&volume);
- EXPECT_EQ(1.0, volume);
-
- // Ensure that invalid volume setting have no effect.
- aos->SetVolume(1.5);
- aos->GetVolume(&volume);
- EXPECT_EQ(1.0, volume);
-
- aos->SetVolume(-0.5);
- aos->GetVolume(&volume);
- EXPECT_EQ(1.0, volume);
-
- aos->Close();
-}
-
-// Test some additional calling sequences.
-TEST(WASAPIAudioOutputStreamTest, MiscCallingSequences) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- AudioOutputStream* aos = CreateDefaultAudioOutputStream(audio_manager.get());
- WASAPIAudioOutputStream* waos = static_cast<WASAPIAudioOutputStream*>(aos);
-
- // Open(), Open() is a valid calling sequence (second call does nothing).
- EXPECT_TRUE(aos->Open());
- EXPECT_TRUE(aos->Open());
-
- MockAudioSourceCallback source;
-
- // Start(), Start() is a valid calling sequence (second call does nothing).
- aos->Start(&source);
- EXPECT_TRUE(waos->started());
- aos->Start(&source);
- EXPECT_TRUE(waos->started());
-
- // Stop(), Stop() is a valid calling sequence (second call does nothing).
- aos->Stop();
- EXPECT_FALSE(waos->started());
- aos->Stop();
- EXPECT_FALSE(waos->started());
-
- // Start(), Stop(), Start(), Stop().
- aos->Start(&source);
- EXPECT_TRUE(waos->started());
- aos->Stop();
- EXPECT_FALSE(waos->started());
- aos->Start(&source);
- EXPECT_TRUE(waos->started());
- aos->Stop();
- EXPECT_FALSE(waos->started());
-
- aos->Close();
-}
-
-// Use default packet size (10ms) and verify that rendering starts.
-TEST(WASAPIAudioOutputStreamTest, PacketSizeInMilliseconds) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- MockAudioSourceCallback source;
-
- // Create default WASAPI output stream which plays out in stereo using
- // the shared mixing rate. The default buffer size is 10ms.
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
-
- // Derive the expected size in bytes of each packet.
- uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
- (aosw.bits_per_sample() / 8);
-
- // Set up expected minimum delay estimation.
- AudioBuffersState state(0, bytes_per_packet);
-
- // Wait for the first callback and verify its parameters.
- EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(aosw.samples_per_packet())));
-
- aos->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- aos->Stop();
- aos->Close();
-}
-
-// Use a fixed packets size (independent of sample rate) and verify
-// that rendering starts.
-TEST(WASAPIAudioOutputStreamTest, PacketSizeInSamples) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- MockAudioSourceCallback source;
-
- // Create default WASAPI output stream which reads data in stereo using
- // the native mixing rate and channel count. The buffer size is set to
- // 1024 samples.
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create(1024);
- EXPECT_TRUE(aos->Open());
-
- // Derive the expected size in bytes of each packet.
- uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
- (aosw.bits_per_sample() / 8);
-
- // Set up expected minimum delay estimation.
- AudioBuffersState state(0, bytes_per_packet);
-
- // Ensure that callbacks start correctly.
- EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(aosw.samples_per_packet())))
- .WillRepeatedly(Return(aosw.samples_per_packet()));
-
- aos->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- aos->Stop();
- aos->Close();
-}
-
-// This test is intended for manual tests and should only be enabled
-// when it is required to play out data from a local PCM file.
-// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
-// To include disabled tests in test execution, just invoke the test program
-// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
-// environment variable to a value greater than 0.
-// The test files are approximately 20 seconds long.
-TEST(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
-
- std::string file_name;
- if (aosw.sample_rate() == 48000) {
- file_name = kSpeechFile_16b_s_48k;
- } else if (aosw.sample_rate() == 44100) {
- file_name = kSpeechFile_16b_s_44k;
- } else if (aosw.sample_rate() == 96000) {
- // Use 48kHz file at 96kHz as well. Will sound like Donald Duck.
- file_name = kSpeechFile_16b_s_48k;
- } else {
- FAIL() << "This test supports 44.1, 48kHz and 96kHz only.";
- return;
- }
- ReadFromFileAudioSource file_source(file_name);
-
- LOG(INFO) << "File name : " << file_name.c_str();
- LOG(INFO) << "Sample rate : " << aosw.sample_rate();
- LOG(INFO) << "Bits per sample: " << aosw.bits_per_sample();
- LOG(INFO) << "#channels : " << aosw.channels();
- LOG(INFO) << "File size : " << file_source.file_size();
- LOG(INFO) << "#file segments : " << kNumFileSegments;
- LOG(INFO) << ">> Listen to the stereo file while playing...";
-
- for (int i = 0; i < kNumFileSegments; i++) {
- // Each segment will start with a short (~20ms) block of zeros, hence
- // some short glitches might be heard in this test if kNumFileSegments
- // is larger than one. The exact length of the silence period depends on
- // the selected sample rate.
- aos->Start(&file_source);
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kFileDurationMs / kNumFileSegments));
- aos->Stop();
- }
-
- LOG(INFO) << ">> Stereo file playout has stopped.";
- aos->Close();
-}
-
-// Verify that we can open the output stream in exclusive mode using a
-// certain set of audio parameters and a sample rate of 48kHz.
-// The expected outcomes of each setting in this test has been derived
-// manually using log outputs (--v=1).
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt48kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
-
- // 10ms @ 48kHz shall work.
- // Note that, this is the same size as we can use for shared-mode streaming
- // but here the endpoint buffer delay is only 10ms instead of 20ms.
- AudioOutputStream* aos = aosw.Create(48000, 480);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-
- // 5ms @ 48kHz does not work due to misalignment.
- // This test will propose an aligned buffer size of 5.3333ms.
- // Note that we must call Close() even is Open() fails since Close() also
- // deletes the object and we want to create a new object in the next test.
- aos = aosw.Create(48000, 240);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 5.3333ms @ 48kHz should work (see test above).
- aos = aosw.Create(48000, 256);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-
- // 2.6667ms is smaller than the minimum supported size (=3ms).
- aos = aosw.Create(48000, 128);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 3ms does not correspond to an aligned buffer size.
- // This test will propose an aligned buffer size of 3.3333ms.
- aos = aosw.Create(48000, 144);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 3.3333ms @ 48kHz <=> smallest possible buffer size we can use.
- aos = aosw.Create(48000, 160);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-}
-
-// Verify that we can open the output stream in exclusive mode using a
-// certain set of audio parameters and a sample rate of 44.1kHz.
-// The expected outcomes of each setting in this test has been derived
-// manually using log outputs (--v=1).
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeBufferSizesAt44kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
-
- // 10ms @ 44.1kHz does not work due to misalignment.
- // This test will propose an aligned buffer size of 10.1587ms.
- AudioOutputStream* aos = aosw.Create(44100, 441);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 10.1587ms @ 44.1kHz shall work (see test above).
- aos = aosw.Create(44100, 448);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-
- // 5.8050ms @ 44.1 should work.
- aos = aosw.Create(44100, 256);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-
- // 4.9887ms @ 44.1kHz does not work to misalignment.
- // This test will propose an aligned buffer size of 5.0794ms.
- // Note that we must call Close() even is Open() fails since Close() also
- // deletes the object and we want to create a new object in the next test.
- aos = aosw.Create(44100, 220);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 5.0794ms @ 44.1kHz shall work (see test above).
- aos = aosw.Create(44100, 224);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-
- // 2.9025ms is smaller than the minimum supported size (=3ms).
- aos = aosw.Create(44100, 132);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 3.01587ms is larger than the minimum size but is not aligned.
- // This test will propose an aligned buffer size of 3.6281ms.
- aos = aosw.Create(44100, 133);
- EXPECT_FALSE(aos->Open());
- aos->Close();
-
- // 3.6281ms @ 44.1kHz <=> smallest possible buffer size we can use.
- aos = aosw.Create(44100, 160);
- EXPECT_TRUE(aos->Open());
- aos->Close();
-}
-
-// Verify that we can open and start the output stream in exclusive mode at
-// the lowest possible delay at 48kHz.
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt48kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- MockAudioSourceCallback source;
-
- // Create exclusive-mode WASAPI output stream which plays out in stereo
- // using the minimum buffer size at 48kHz sample rate.
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create(48000, 160);
- EXPECT_TRUE(aos->Open());
-
- // Derive the expected size in bytes of each packet.
- uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
- (aosw.bits_per_sample() / 8);
-
- // Set up expected minimum delay estimation.
- AudioBuffersState state(0, bytes_per_packet);
-
- // Wait for the first callback and verify its parameters.
- EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(aosw.samples_per_packet())))
- .WillRepeatedly(Return(aosw.samples_per_packet()));
-
- aos->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- aos->Stop();
- aos->Close();
-}
-
-// Verify that we can open and start the output stream in exclusive mode at
-// the lowest possible delay at 44.1kHz.
-TEST(WASAPIAudioOutputStreamTest, ExclusiveModeMinBufferSizeAt44kHz) {
- if (!ExclusiveModeIsEnabled())
- return;
-
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- MockAudioSourceCallback source;
-
- // Create exclusive-mode WASAPI output stream which plays out in stereo
- // using the minimum buffer size at 44.1kHz sample rate.
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create(44100, 160);
- EXPECT_TRUE(aos->Open());
-
- // Derive the expected size in bytes of each packet.
- uint32 bytes_per_packet = aosw.channels() * aosw.samples_per_packet() *
- (aosw.bits_per_sample() / 8);
-
- // Set up expected minimum delay estimation.
- AudioBuffersState state(0, bytes_per_packet);
-
- // Wait for the first callback and verify its parameters.
- EXPECT_CALL(source, OnMoreData(NotNull(), HasValidDelay(state)))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(aosw.samples_per_packet())))
- .WillRepeatedly(Return(aosw.samples_per_packet()));
-
- aos->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- aos->Stop();
- aos->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_manager_win.cc b/src/media/audio/win/audio_manager_win.cc
deleted file mode 100644
index 9b788bf..0000000
--- a/src/media/audio/win/audio_manager_win.cc
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/audio_io.h"
-
-#include <windows.h>
-#include <objbase.h> // This has to be before initguid.h
-#include <initguid.h>
-#include <mmsystem.h>
-#include <setupapi.h>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/command_line.h"
-#include "base/file_path.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/process_util.h"
-#include "base/string_number_conversions.h"
-#include "base/string_util.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_device_listener_win.h"
-#include "media/audio/win/audio_low_latency_input_win.h"
-#include "media/audio/win/audio_low_latency_output_win.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/audio_unified_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/audio/win/device_enumeration_win.h"
-#include "media/audio/win/wavein_input_win.h"
-#include "media/audio/win/waveout_output_win.h"
-#include "media/base/bind_to_loop.h"
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-// Libraries required for the SetupAPI and Wbem APIs used here.
-#pragma comment(lib, "setupapi.lib")
-
-// The following are defined in various DDK headers, and we (re)define them here
-// to avoid adding the DDK as a chrome dependency.
-#define DRV_QUERYDEVICEINTERFACE 0x80c
-#define DRVM_MAPPER_PREFERRED_GET 0x2015
-#define DRV_QUERYDEVICEINTERFACESIZE 0x80d
-DEFINE_GUID(AM_KSCATEGORY_AUDIO, 0x6994ad04, 0x93ef, 0x11d0,
- 0xa3, 0xcc, 0x00, 0xa0, 0xc9, 0x22, 0x31, 0x96);
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-// Up to 8 channels can be passed to the driver. This should work, given the
-// right drivers, but graceful error handling is needed.
-static const int kWinMaxChannels = 8;
-
-// We use 3 buffers for recording audio so that if a recording callback takes
-// some time to return we won't lose audio. More buffers while recording are
-// ok because they don't introduce any delay in recording, unlike in playback
-// where you first need to fill in that number of buffers before starting to
-// play.
-static const int kNumInputBuffers = 3;
-
-static int GetVersionPartAsInt(DWORDLONG num) {
- return static_cast<int>(num & 0xffff);
-}
-
-// Returns a string containing the given device's description and installed
-// driver version.
-static string16 GetDeviceAndDriverInfo(HDEVINFO device_info,
- SP_DEVINFO_DATA* device_data) {
- // Save the old install params setting and set a flag for the
- // SetupDiBuildDriverInfoList below to return only the installed drivers.
- SP_DEVINSTALL_PARAMS old_device_install_params;
- old_device_install_params.cbSize = sizeof(old_device_install_params);
- SetupDiGetDeviceInstallParams(device_info, device_data,
- &old_device_install_params);
- SP_DEVINSTALL_PARAMS device_install_params = old_device_install_params;
- device_install_params.FlagsEx |= DI_FLAGSEX_INSTALLEDDRIVER;
- SetupDiSetDeviceInstallParams(device_info, device_data,
- &device_install_params);
-
- SP_DRVINFO_DATA driver_data;
- driver_data.cbSize = sizeof(driver_data);
- string16 device_and_driver_info;
- if (SetupDiBuildDriverInfoList(device_info, device_data,
- SPDIT_COMPATDRIVER)) {
- if (SetupDiEnumDriverInfo(device_info, device_data, SPDIT_COMPATDRIVER, 0,
- &driver_data)) {
- DWORDLONG version = driver_data.DriverVersion;
- device_and_driver_info = string16(driver_data.Description) + L" v" +
- base::IntToString16(GetVersionPartAsInt((version >> 48))) + L"." +
- base::IntToString16(GetVersionPartAsInt((version >> 32))) + L"." +
- base::IntToString16(GetVersionPartAsInt((version >> 16))) + L"." +
- base::IntToString16(GetVersionPartAsInt(version));
- }
- SetupDiDestroyDriverInfoList(device_info, device_data, SPDIT_COMPATDRIVER);
- }
-
- SetupDiSetDeviceInstallParams(device_info, device_data,
- &old_device_install_params);
-
- return device_and_driver_info;
-}
-
-AudioManagerWin::AudioManagerWin() {
- if (!CoreAudioUtil::IsSupported()) {
- // Use the Wave API for device enumeration if XP or lower.
- enumeration_type_ = kWaveEnumeration;
- } else {
- // Use the MMDevice API for device enumeration if Vista or higher.
- enumeration_type_ = kMMDeviceEnumeration;
- }
-
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
-
- // Task must be posted last to avoid races from handing out "this" to the
- // audio thread.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::CreateDeviceListener, base::Unretained(this)));
-}
-
-AudioManagerWin::~AudioManagerWin() {
- // It's safe to post a task here since Shutdown() will wait for all tasks to
- // complete before returning.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::DestroyDeviceListener, base::Unretained(this)));
- Shutdown();
-}
-
-bool AudioManagerWin::HasAudioOutputDevices() {
- return (::waveOutGetNumDevs() != 0);
-}
-
-bool AudioManagerWin::HasAudioInputDevices() {
- return (::waveInGetNumDevs() != 0);
-}
-
-void AudioManagerWin::CreateDeviceListener() {
- // AudioDeviceListenerWin must be initialized on a COM thread and should only
- // be used if WASAPI / Core Audio is supported.
- if (CoreAudioUtil::IsSupported()) {
- output_device_listener_.reset(new AudioDeviceListenerWin(BindToLoop(
- GetMessageLoop(), base::Bind(
- &AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
- base::Unretained(this)))));
- }
-}
-
-void AudioManagerWin::DestroyDeviceListener() {
- output_device_listener_.reset();
-}
-
-string16 AudioManagerWin::GetAudioInputDeviceModel() {
- // Get the default audio capture device and its device interface name.
- DWORD device_id = 0;
- waveInMessage(reinterpret_cast<HWAVEIN>(WAVE_MAPPER),
- DRVM_MAPPER_PREFERRED_GET,
- reinterpret_cast<DWORD_PTR>(&device_id), NULL);
- ULONG device_interface_name_size = 0;
- waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
- DRV_QUERYDEVICEINTERFACESIZE,
- reinterpret_cast<DWORD_PTR>(&device_interface_name_size), 0);
- size_t bytes_in_char16 = sizeof(string16::value_type);
- DCHECK_EQ(0u, device_interface_name_size % bytes_in_char16);
- if (device_interface_name_size <= bytes_in_char16)
- return string16(); // No audio capture device.
-
- string16 device_interface_name;
- string16::value_type* name_ptr = WriteInto(&device_interface_name,
- device_interface_name_size / bytes_in_char16);
- waveInMessage(reinterpret_cast<HWAVEIN>(device_id),
- DRV_QUERYDEVICEINTERFACE,
- reinterpret_cast<DWORD_PTR>(name_ptr),
- static_cast<DWORD_PTR>(device_interface_name_size));
-
- // Enumerate all audio devices and find the one matching the above device
- // interface name.
- HDEVINFO device_info = SetupDiGetClassDevs(
- &AM_KSCATEGORY_AUDIO, 0, 0, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT);
- if (device_info == INVALID_HANDLE_VALUE)
- return string16();
-
- DWORD interface_index = 0;
- SP_DEVICE_INTERFACE_DATA interface_data;
- interface_data.cbSize = sizeof(interface_data);
- while (SetupDiEnumDeviceInterfaces(device_info, 0, &AM_KSCATEGORY_AUDIO,
- interface_index++, &interface_data)) {
- // Query the size of the struct, allocate it and then query the data.
- SP_DEVINFO_DATA device_data;
- device_data.cbSize = sizeof(device_data);
- DWORD interface_detail_size = 0;
- SetupDiGetDeviceInterfaceDetail(device_info, &interface_data, 0, 0,
- &interface_detail_size, &device_data);
- if (!interface_detail_size)
- continue;
-
- scoped_array<char> interface_detail_buffer(new char[interface_detail_size]);
- SP_DEVICE_INTERFACE_DETAIL_DATA* interface_detail =
- reinterpret_cast<SP_DEVICE_INTERFACE_DETAIL_DATA*>(
- interface_detail_buffer.get());
- interface_detail->cbSize = interface_detail_size;
- if (!SetupDiGetDeviceInterfaceDetail(device_info, &interface_data,
- interface_detail,
- interface_detail_size, NULL,
- &device_data))
- return string16();
-
- bool device_found = (device_interface_name == interface_detail->DevicePath);
-
- if (device_found)
- return GetDeviceAndDriverInfo(device_info, &device_data);
- }
-
- return string16();
-}
-
-bool AudioManagerWin::CanShowAudioInputSettings() {
- return true;
-}
-
-void AudioManagerWin::ShowAudioInputSettings() {
- std::wstring program;
- std::string argument;
- if (!CoreAudioUtil::IsSupported()) {
- program = L"sndvol32.exe";
- argument = "-R";
- } else {
- program = L"control.exe";
- argument = "mmsys.cpl,,1";
- }
-
- FilePath path;
- PathService::Get(base::DIR_SYSTEM, &path);
- path = path.Append(program);
- CommandLine command_line(path);
- command_line.AppendArg(argument);
- base::LaunchProcess(command_line, base::LaunchOptions(), NULL);
-}
-
-void AudioManagerWin::GetAudioInputDeviceNames(
- media::AudioDeviceNames* device_names) {
- DCHECK(enumeration_type() != kUninitializedEnumeration);
- // Enumerate all active audio-endpoint capture devices.
- if (enumeration_type() == kWaveEnumeration) {
- // Utilize the Wave API for Windows XP.
- media::GetInputDeviceNamesWinXP(device_names);
- } else {
- // Utilize the MMDevice API (part of Core Audio) for Vista and higher.
- media::GetInputDeviceNamesWin(device_names);
- }
-
- // Always add default device parameters as first element.
- if (!device_names->empty()) {
- media::AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
- name.unique_id = AudioManagerBase::kDefaultDeviceId;
- device_names->push_front(name);
- }
-}
-
-// Factory for the implementations of AudioOutputStream for AUDIO_PCM_LINEAR
-// mode.
-// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
-AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- if (params.channels() > kWinMaxChannels)
- return NULL;
-
- return new PCMWaveOutAudioOutputStream(this,
- params,
- media::NumberOfWaveOutBuffers(),
- WAVE_MAPPER);
-}
-
-// Factory for the implementations of AudioOutputStream for
-// AUDIO_PCM_LOW_LATENCY mode. Two implementations should suffice most
-// windows user's needs.
-// - PCMWaveOutAudioOutputStream: Based on the waveOut API.
-// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
-AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- if (params.channels() > kWinMaxChannels)
- return NULL;
-
- if (!CoreAudioUtil::IsSupported()) {
- // Fall back to Windows Wave implementation on Windows XP or lower.
- DVLOG(1) << "Using WaveOut since WASAPI requires at least Vista.";
- return new PCMWaveOutAudioOutputStream(this, params, 2, WAVE_MAPPER);
- }
-
- // TODO(henrika): remove once we properly handle input device selection.
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableWebAudioInput)) {
- if (WASAPIUnifiedStream::HasUnifiedDefaultIO()) {
- DVLOG(1) << "WASAPIUnifiedStream is created.";
- return new WASAPIUnifiedStream(this, params);
- }
- LOG(WARNING) << "Unified audio I/O is not supported.";
- }
-
- return new WASAPIAudioOutputStream(this, params, eConsole);
-}
-
-// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
-// mode.
-AudioInputStream* AudioManagerWin::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return CreatePCMWaveInAudioInputStream(params, device_id);
-}
-
-// Factory for the implementations of AudioInputStream for
-// AUDIO_PCM_LOW_LATENCY mode.
-AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- AudioInputStream* stream = NULL;
- if (!CoreAudioUtil::IsSupported()) {
- // Fall back to Windows Wave implementation on Windows XP or lower.
- DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
- stream = CreatePCMWaveInAudioInputStream(params, device_id);
- } else {
- stream = new WASAPIAudioInputStream(this, params, device_id);
- }
-
- return stream;
-}
-
-AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
- const AudioParameters& params,
- const std::string& device_id) {
- std::string xp_device_id = device_id;
- if (device_id != AudioManagerBase::kDefaultDeviceId &&
- enumeration_type_ == kMMDeviceEnumeration) {
- xp_device_id = media::ConvertToWinXPDeviceId(device_id);
- if (xp_device_id.empty()) {
- DLOG(ERROR) << "Cannot find a waveIn device which matches the device ID "
- << device_id;
- return NULL;
- }
- }
-
- return new PCMWaveInAudioInputStream(this, params, kNumInputBuffers,
- xp_device_id);
-}
-
-/// static
-AudioManager* CreateAudioManager() {
- return new AudioManagerWin();
-}
-
-AudioParameters AudioManagerWin::GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) {
- // If WASAPI isn't supported we'll fallback to WaveOut, which will take care
- // of resampling and bits per sample changes. By setting these equal to the
- // input values, AudioOutputResampler will skip resampling and bit per sample
- // differences (since the input parameters will match the output parameters).
- int sample_rate = input_params.sample_rate();
- int bits_per_sample = input_params.bits_per_sample();
- ChannelLayout channel_layout = input_params.channel_layout();
- if (CoreAudioUtil::IsSupported()) {
- sample_rate = GetAudioHardwareSampleRate();
- bits_per_sample = 16;
- channel_layout = WASAPIAudioOutputStream::HardwareChannelLayout();
- }
-
- // TODO(dalecurtis): This should include hardware bits per channel eventually.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, GetAudioHardwareBufferSize());
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_manager_win.h b/src/media/audio/win/audio_manager_win.h
deleted file mode 100644
index 4ce2fbc..0000000
--- a/src/media/audio/win/audio_manager_win.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
-
-#include <string>
-
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class AudioDeviceListenerWin;
-
-// Windows implementation of the AudioManager singleton. This class is internal
-// to the audio output and only internal users can call methods not exposed by
-// the AudioManager class.
-class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
- public:
- AudioManagerWin();
-
- // Implementation of AudioManager.
- virtual bool HasAudioOutputDevices() OVERRIDE;
- virtual bool HasAudioInputDevices() OVERRIDE;
- virtual string16 GetAudioInputDeviceModel() OVERRIDE;
- virtual bool CanShowAudioInputSettings() OVERRIDE;
- virtual void ShowAudioInputSettings() OVERRIDE;
- virtual void GetAudioInputDeviceNames(media::AudioDeviceNames* device_names)
- OVERRIDE;
-
- // Implementation of AudioManagerBase.
- virtual AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params) OVERRIDE;
- virtual AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) OVERRIDE;
- virtual AudioParameters GetPreferredLowLatencyOutputStreamParameters(
- const AudioParameters& input_params) OVERRIDE;
-
- protected:
- virtual ~AudioManagerWin();
-
- private:
- enum EnumerationType {
- kUninitializedEnumeration = 0,
- kMMDeviceEnumeration,
- kWaveEnumeration,
- };
-
- // Allow unit test to modify the utilized enumeration API.
- friend class AudioInputDeviceTest;
-
- EnumerationType enumeration_type_;
- EnumerationType enumeration_type() { return enumeration_type_; }
- void SetEnumerationType(EnumerationType type) {
- enumeration_type_ = type;
- }
-
- // Returns a PCMWaveInAudioInputStream instance or NULL on failure.
- // This method converts MMDevice-style device ID to WaveIn-style device ID if
- // necessary.
- // (Please see device_enumeration_win.h for more info about the two kinds of
- // device IDs.)
- AudioInputStream* CreatePCMWaveInAudioInputStream(
- const AudioParameters& params,
- const std::string& device_id);
-
- // Helper methods for constructing AudioDeviceListenerWin on the audio thread.
- void CreateDeviceListener();
- void DestroyDeviceListener();
-
- // Listen for output device changes.
- scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerWin);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_MANAGER_WIN_H_
diff --git a/src/media/audio/win/audio_output_win_unittest.cc b/src/media/audio/win/audio_output_win_unittest.cc
deleted file mode 100644
index 40b4d81..0000000
--- a/src/media/audio/win/audio_output_win_unittest.cc
+++ /dev/null
@@ -1,693 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <windows.h>
-#include <mmsystem.h>
-
-#include "base/basictypes.h"
-#include "base/base_paths.h"
-#include "base/file_util.h"
-#include "base/memory/aligned_memory.h"
-#include "base/path_service.h"
-#include "base/sync_socket.h"
-#include "base/win/scoped_com_initializer.h"
-#include "base/win/windows_version.h"
-#include "media/base/limits.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::DoAll;
-using ::testing::Field;
-using ::testing::Invoke;
-using ::testing::InSequence;
-using ::testing::NiceMock;
-using ::testing::NotNull;
-using ::testing::Return;
-
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const wchar_t kAudioFile1_16b_m_16K[]
- = L"media\\test\\data\\sweep02_16b_mono_16KHz.raw";
-
-// This class allows to find out if the callbacks are occurring as
-// expected and if any error has been reported.
-class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
- public:
- explicit TestSourceBasic()
- : callback_count_(0),
- had_error_(0) {
- }
- // AudioSourceCallback::OnMoreData implementation:
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- ++callback_count_;
- // Touch the channel memory value to make sure memory is good.
- audio_bus->Zero();
- return audio_bus->frames();
- }
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- }
- // AudioSourceCallback::OnError implementation:
- virtual void OnError(AudioOutputStream* stream, int code) {
- ++had_error_;
- }
- // Returns how many times OnMoreData() has been called.
- int callback_count() const {
- return callback_count_;
- }
- // Returns how many times the OnError callback was called.
- int had_error() const {
- return had_error_;
- }
-
- void set_error(bool error) {
- had_error_ += error ? 1 : 0;
- }
-
- private:
- int callback_count_;
- int had_error_;
-};
-
-const int kMaxNumBuffers = 3;
-// Specializes TestSourceBasic to simulate a source that blocks for some time
-// in the OnMoreData callback.
-class TestSourceLaggy : public TestSourceBasic {
- public:
- TestSourceLaggy(int laggy_after_buffer, int lag_in_ms)
- : laggy_after_buffer_(laggy_after_buffer), lag_in_ms_(lag_in_ms) {
- }
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- // Call the base, which increments the callback_count_.
- TestSourceBasic::OnMoreData(audio_bus, buffers_state);
- if (callback_count() > kMaxNumBuffers) {
- ::Sleep(lag_in_ms_);
- }
- return audio_bus->frames();
- }
- private:
- int laggy_after_buffer_;
- int lag_in_ms_;
-};
-
-class MockAudioSource : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-
- static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
- audio_bus->Zero();
- return audio_bus->frames();
- }
-};
-
-// Helper class to memory map an entire file. The mapping is read-only. Don't
-// use for gigabyte-sized files. Attempts to write to this memory generate
-// memory access violations.
-class ReadOnlyMappedFile {
- public:
- explicit ReadOnlyMappedFile(const wchar_t* file_name)
- : fmap_(NULL), start_(NULL), size_(0) {
- HANDLE file = ::CreateFileW(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
- OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
- if (INVALID_HANDLE_VALUE == file)
- return;
- fmap_ = ::CreateFileMappingW(file, NULL, PAGE_READONLY, 0, 0, NULL);
- ::CloseHandle(file);
- if (!fmap_)
- return;
- start_ = reinterpret_cast<char*>(::MapViewOfFile(fmap_, FILE_MAP_READ,
- 0, 0, 0));
- if (!start_)
- return;
- MEMORY_BASIC_INFORMATION mbi = {0};
- ::VirtualQuery(start_, &mbi, sizeof(mbi));
- size_ = mbi.RegionSize;
- }
- ~ReadOnlyMappedFile() {
- if (start_) {
- ::UnmapViewOfFile(start_);
- ::CloseHandle(fmap_);
- }
- }
- // Returns true if the file was successfully mapped.
- bool is_valid() const {
- return ((start_ > 0) && (size_ > 0));
- }
- // Returns the size in bytes of the mapped memory.
- uint32 size() const {
- return size_;
- }
- // Returns the memory backing the file.
- const void* GetChunkAt(uint32 offset) {
- return &start_[offset];
- }
-
- private:
- HANDLE fmap_;
- char* start_;
- uint32 size_;
-};
-
-// ===========================================================================
-// Validation of AudioManager::AUDIO_PCM_LINEAR
-//
-// NOTE:
-// The tests can fail on the build bots when somebody connects to them via
-// remote-desktop and the rdp client installs an audio device that fails to open
-// at some point, possibly when the connection goes idle.
-
-// Test that can it be created and closed.
-TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 256));
- ASSERT_TRUE(NULL != oas);
- oas->Close();
-}
-
-// Test that can it be cannot be created with invalid parameters.
-TEST(WinAudioTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0)));
- EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
- media::limits::kMaxSamplesPerPacket + 1)));
-}
-
-// Test that it can be opened and closed.
-TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 256));
- ASSERT_TRUE(NULL != oas);
- EXPECT_TRUE(oas->Open());
- oas->Close();
-}
-
-// Test that it has a maximum packet size.
-TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- 8000, 16, 1024 * 1024 * 1024));
- EXPECT_TRUE(NULL == oas);
- if (oas)
- oas->Close();
-}
-
-// Test potential deadlock situation if the source is slow or blocks for some
-// time. The actual EXPECT_GT are mostly meaningless and the real test is that
-// the test completes in reasonable time.
-TEST(WinAudioTest, PCMWaveSlowSource) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- 16000, 16, 256));
- ASSERT_TRUE(NULL != oas);
- TestSourceLaggy test_laggy(2, 90);
- EXPECT_TRUE(oas->Open());
- // The test parameters cause a callback every 32 ms and the source is
- // sleeping for 90 ms, so it is guaranteed that we run out of ready buffers.
- oas->Start(&test_laggy);
- ::Sleep(500);
- EXPECT_GT(test_laggy.callback_count(), 2);
- EXPECT_FALSE(test_laggy.had_error());
- oas->Stop();
- ::Sleep(500);
- oas->Close();
-}
-
-// Test another potential deadlock situation if the thread that calls Start()
-// gets paused. This test is best when run over RDP with audio enabled. See
-// bug 19276 for more details.
-TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, samples_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
-
- EXPECT_TRUE(oas->Open());
- oas->SetVolume(1.0);
-
- for (int ix = 0; ix != 5; ++ix) {
- oas->Start(&source);
- ::Sleep(10);
- oas->Stop();
- }
- oas->Close();
-}
-
-
-// This test produces actual audio for .5 seconds on the default wave
-// device at 44.1K s/sec. Parameters have been chosen carefully so you should
-// not hear pops or noises while the sound is playing.
-TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, samples_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
-
- EXPECT_TRUE(oas->Open());
- oas->SetVolume(1.0);
- oas->Start(&source);
- ::Sleep(500);
- oas->Stop();
- oas->Close();
-}
-
-// This test produces actual audio for for .5 seconds on the default wave
-// device at 22K s/sec. Parameters have been chosen carefully so you should
-// not hear pops or noises while the sound is playing. The audio also should
-// sound with a lower volume than PCMWaveStreamPlay200HzTone44Kss.
-TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 20;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate / 2, 16,
- samples_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
-
- EXPECT_TRUE(oas->Open());
-
- oas->SetVolume(0.5);
- oas->Start(&source);
- ::Sleep(500);
-
- // Test that the volume is within the set limits.
- double volume = 0.0;
- oas->GetVolume(&volume);
- EXPECT_LT(volume, 0.51);
- EXPECT_GT(volume, 0.49);
- oas->Stop();
- oas->Close();
-}
-
-// Uses a restricted source to play ~2 seconds of audio for about 5 seconds. We
-// try hard to generate situation where the two threads are accessing the
-// object roughly at the same time.
-TEST(WinAudioTest, PushSourceFile16KHz) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- static const int kSampleRate = 16000;
- SineWaveAudioSource source(1, 200.0, kSampleRate);
- // Compute buffer size for 100ms of audio.
- const uint32 kSamples100ms = (kSampleRate / 1000) * 100;
- // Restrict SineWaveAudioSource to 100ms of samples.
- source.CapSamples(kSamples100ms);
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- kSampleRate, 16, kSamples100ms));
- ASSERT_TRUE(NULL != oas);
-
- EXPECT_TRUE(oas->Open());
-
- oas->SetVolume(1.0);
- oas->Start(&source);
-
- // We buffer and play at the same time, buffering happens every ~10ms and the
- // consuming of the buffer happens every ~100ms. We do 100 buffers which
- // effectively wrap around the file more than once.
- for (uint32 ix = 0; ix != 100; ++ix) {
- ::Sleep(10);
- source.Reset();
- }
-
- // Play a little bit more of the file.
- ::Sleep(500);
-
- oas->Stop();
- oas->Close();
-}
-
-// This test is to make sure an AudioOutputStream can be started after it was
-// stopped. You will here two .5 seconds wave signal separated by 0.5 seconds
-// of silence.
-TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, samples_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
- EXPECT_TRUE(oas->Open());
- oas->SetVolume(1.0);
-
- // Play the wave for .5 seconds.
- oas->Start(&source);
- ::Sleep(500);
- oas->Stop();
-
- // Sleep to give silence after stopping the AudioOutputStream.
- ::Sleep(250);
-
- // Start again and play for .5 seconds.
- oas->Start(&source);
- ::Sleep(500);
- oas->Stop();
-
- oas->Close();
-}
-
-// With the low latency mode, WASAPI is utilized by default for Vista and
-// higher and Wave is used for XP and lower. It is possible to utilize a
-// smaller buffer size for WASAPI than for Wave.
-TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- // The WASAPI API requires a correct COM environment.
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Use 10 ms buffer size for WASAPI and 50 ms buffer size for Wave.
- // Take the existing native sample rate into account.
- int sample_rate = static_cast<int>(media::GetAudioHardwareSampleRate());
- uint32 samples_10_ms = sample_rate / 100;
- int n = 1;
- (base::win::GetVersion() <= base::win::VERSION_XP) ? n = 5 : n = 1;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_MONO, sample_rate,
- 16, n * samples_10_ms));
- ASSERT_TRUE(NULL != oas);
-
- SineWaveAudioSource source(1, 200, sample_rate);
-
- bool opened = oas->Open();
- if (!opened) {
- // It was not possible to open this audio device in mono.
- // No point in continuing the test so let's break here.
- LOG(WARNING) << "Mono is not supported. Skipping test.";
- oas->Close();
- return;
- }
- oas->SetVolume(1.0);
-
- // Play the wave for .8 seconds.
- oas->Start(&source);
- ::Sleep(800);
- oas->Stop();
- oas->Close();
-}
-
-// Check that the pending bytes value is correct what the stream starts.
-TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- uint32 samples_100_ms = AudioParameters::kAudioCDSampleRate / 10;
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
- AudioParameters::kAudioCDSampleRate, 16, samples_100_ms));
- ASSERT_TRUE(NULL != oas);
-
- NiceMock<MockAudioSource> source;
- EXPECT_TRUE(oas->Open());
-
- uint32 bytes_100_ms = samples_100_ms * 2;
-
- // Audio output stream has either a double or triple buffer scheme.
- // We expect the amount of pending bytes will reaching up to 2 times of
- // |bytes_100_ms| depending on number of buffers used.
- // From that it would decrease as we are playing the data but not providing
- // new one. And then we will try to provide zero data so the amount of
- // pending bytes will go down and eventually read zero.
- InSequence s;
-
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes, 0)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
- switch (NumberOfWaveOutBuffers()) {
- case 2:
- break; // Calls are the same as at end of 3-buffer scheme.
- case 3:
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- 2 * bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- 2 * bytes_100_ms)))
- .Times(AnyNumber())
- .WillRepeatedly(Return(0));
- break;
- default:
- ASSERT_TRUE(false)
- << "Unexpected number of buffers: " << NumberOfWaveOutBuffers();
- }
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes,
- bytes_100_ms)))
- .Times(AnyNumber())
- .WillRepeatedly(Return(0));
- EXPECT_CALL(source, OnMoreData(NotNull(),
- Field(&AudioBuffersState::pending_bytes, 0)))
- .Times(AnyNumber())
- .WillRepeatedly(Return(0));
-
- oas->Start(&source);
- ::Sleep(500);
- oas->Stop();
- oas->Close();
-}
-
-// Simple source that uses a SyncSocket to retrieve the audio data
-// from a potentially remote thread.
-class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
- public:
- SyncSocketSource(base::SyncSocket* socket, const AudioParameters& params)
- : socket_(socket) {
- // Setup AudioBus wrapping data we'll receive over the sync socket.
- data_size_ = AudioBus::CalculateMemorySize(params);
- data_.reset(static_cast<float*>(
- base::AlignedAlloc(data_size_, AudioBus::kChannelAlignment)));
- audio_bus_ = AudioBus::WrapMemory(params, data_.get());
- }
- ~SyncSocketSource() {}
-
- // AudioSourceCallback::OnMoreData implementation:
- virtual int OnMoreData(AudioBus* audio_bus,
- AudioBuffersState buffers_state) {
- socket_->Send(&buffers_state, sizeof(buffers_state));
- uint32 size = socket_->Receive(data_.get(), data_size_);
- DCHECK_EQ(static_cast<size_t>(size) % sizeof(*audio_bus_->channel(0)), 0U);
- audio_bus_->CopyTo(audio_bus);
- return audio_bus_->frames();
- }
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- }
- // AudioSourceCallback::OnError implementation:
- virtual void OnError(AudioOutputStream* stream, int code) {
- }
-
- private:
- base::SyncSocket* socket_;
- int data_size_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
- scoped_ptr<AudioBus> audio_bus_;
-};
-
-struct SyncThreadContext {
- base::SyncSocket* socket;
- int sample_rate;
- int channels;
- int frames;
- double sine_freq;
- uint32 packet_size_bytes;
-};
-
-// This thread provides the data that the SyncSocketSource above needs
-// using the other end of a SyncSocket. The protocol is as follows:
-//
-// SyncSocketSource ---send 4 bytes ------------> SyncSocketThread
-// <--- audio packet ----------
-//
-DWORD __stdcall SyncSocketThread(void* context) {
- SyncThreadContext& ctx = *(reinterpret_cast<SyncThreadContext*>(context));
-
- // Setup AudioBus wrapping data we'll pass over the sync socket.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
- base::AlignedAlloc(ctx.packet_size_bytes, AudioBus::kChannelAlignment)));
- scoped_ptr<AudioBus> audio_bus = AudioBus::WrapMemory(
- ctx.channels, ctx.frames, data.get());
-
- SineWaveAudioSource sine(1, ctx.sine_freq, ctx.sample_rate);
- const int kTwoSecFrames = ctx.sample_rate * 2;
-
- AudioBuffersState buffers_state;
- int times = 0;
- for (int ix = 0; ix < kTwoSecFrames; ix += ctx.frames) {
- if (ctx.socket->Receive(&buffers_state, sizeof(buffers_state)) == 0)
- break;
- if ((times > 0) && (buffers_state.pending_bytes < 1000)) __debugbreak();
- sine.OnMoreData(audio_bus.get(), buffers_state);
- ctx.socket->Send(data.get(), ctx.packet_size_bytes);
- ++times;
- }
-
- return 0;
-}
-
-// Test the basic operation of AudioOutputStream used with a SyncSocket.
-// The emphasis is to verify that it is possible to feed data to the audio
-// layer using a source based on SyncSocket. In a real situation we would
-// go for the low-latency version in combination with SyncSocket, but to keep
-// the test more simple, AUDIO_PCM_LINEAR is utilized instead. The main
-// principle of the test still remains and we avoid the additional complexity
-// related to the two different audio-layers for AUDIO_PCM_LOW_LATENCY.
-// In this test you should hear a continuous 200Hz tone for 2 seconds.
-TEST(WinAudioTest, SyncSocketBasic) {
- scoped_ptr<AudioManager> audio_man(AudioManager::Create());
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output device detected.";
- return;
- }
-
- static const int sample_rate = AudioParameters::kAudioCDSampleRate;
- static const uint32 kSamples20ms = sample_rate / 50;
- AudioParameters params(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_MONO, sample_rate, 16, kSamples20ms);
-
-
- AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params);
- ASSERT_TRUE(NULL != oas);
-
- ASSERT_TRUE(oas->Open());
-
- base::SyncSocket sockets[2];
- ASSERT_TRUE(base::SyncSocket::CreatePair(&sockets[0], &sockets[1]));
-
- SyncSocketSource source(&sockets[0], params);
-
- SyncThreadContext thread_context;
- thread_context.sample_rate = params.sample_rate();
- thread_context.sine_freq = 200.0;
- thread_context.packet_size_bytes = AudioBus::CalculateMemorySize(params);
- thread_context.frames = params.frames_per_buffer();
- thread_context.channels = params.channels();
- thread_context.socket = &sockets[1];
-
- HANDLE thread = ::CreateThread(NULL, 0, SyncSocketThread,
- &thread_context, 0, NULL);
-
- oas->Start(&source);
-
- ::WaitForSingleObject(thread, INFINITE);
- ::CloseHandle(thread);
-
- oas->Stop();
- oas->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_unified_win.cc b/src/media/audio/win/audio_unified_win.cc
deleted file mode 100644
index 677f9e0..0000000
--- a/src/media/audio/win/audio_unified_win.cc
+++ /dev/null
@@ -1,569 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_unified_win.h"
-
-#include <Functiondiscoverykeys_devpkey.h>
-
-#include "base/debug/trace_event.h"
-#include "base/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/avrt_wrapper_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-
-using base::win::ScopedComPtr;
-using base::win::ScopedCOMInitializer;
-using base::win::ScopedCoMem;
-
-// Time in milliseconds between two successive delay measurements.
-// We save resources by not updating the delay estimates for each capture
-// event (typically 100Hz rate).
-static const size_t kTimeDiffInMillisecondsBetweenDelayMeasurements = 1000;
-
-// Compare two sets of audio parameters and return true if they are equal.
-// Note that bits_per_sample() is excluded from this comparison since Core
-// Audio can deal with most bit depths. As an example, if the native/mixing
-// bit depth is 32 bits (default), opening at 16 or 24 still works fine and
-// the audio engine will do the required conversion for us.
-static bool CompareAudioParameters(const media::AudioParameters& a,
- const media::AudioParameters& b) {
- return (a.format() == b.format() &&
- a.channels() == b.channels() &&
- a.sample_rate() == b.sample_rate() &&
- a.frames_per_buffer() == b.frames_per_buffer());
-}
-
-// Use the acquired IAudioClock interface to derive a time stamp of the audio
-// sample which is currently playing through the speakers.
-static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
- UINT64 device_frequency = 0, position = 0;
- if (FAILED(clock->GetFrequency(&device_frequency)) ||
- FAILED(clock->GetPosition(&position, NULL))) {
- return 0.0;
- }
-
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(position) / device_frequency);
-}
-
-// Get a time stamp in milliseconds given number of audio frames in |num_frames|
-// using the current sample rate |fs| as scale factor.
-// Example: |num_frames| = 960 and |fs| = 48000 => 20 [ms].
-static double CurrentStreamPosInMilliseconds(UINT64 num_frames, DWORD fs) {
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(num_frames) / fs);
-}
-
-// Convert a timestamp in milliseconds to byte units given the audio format
-// in |format|.
-// Example: |ts_milliseconds| equals 10, sample rate is 48000 and frame size
-// is 4 bytes per audio frame => 480 * 4 = 1920 [bytes].
-static int MillisecondsToBytes(double ts_milliseconds,
- const WAVEFORMATPCMEX& format) {
- double seconds = ts_milliseconds / base::Time::kMillisecondsPerSecond;
- return static_cast<int>(seconds * format.Format.nSamplesPerSec *
- format.Format.nBlockAlign + 0.5);
-}
-
-namespace media {
-
-WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params)
- : creating_thread_id_(base::PlatformThread::CurrentId()),
- manager_(manager),
- share_mode_(CoreAudioUtil::GetShareMode()),
- audio_io_thread_(NULL),
- opened_(false),
- endpoint_render_buffer_size_frames_(0),
- endpoint_capture_buffer_size_frames_(0),
- num_written_frames_(0),
- total_delay_ms_(0.0),
- source_(NULL),
- capture_bus_(AudioBus::Create(params)),
- render_bus_(AudioBus::Create(params)) {
- DCHECK(manager_);
-
- DVLOG_IF(1, !HasUnifiedDefaultIO()) << "Unified audio I/O is not supported.";
- DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
- << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
-
-#if !defined(NDEBUG)
- // Add log message if input parameters are not identical to the preferred
- // parameters.
- AudioParameters mix_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &mix_params);
- DVLOG_IF(1, SUCCEEDED(hr) && !CompareAudioParameters(params, mix_params)) <<
- "Input and preferred parameters are not identical.";
-#endif
-
- // Load the Avrt DLL if not already loaded. Required to support MMCSS.
- bool avrt_init = avrt::Initialize();
- DCHECK(avrt_init) << "Failed to load the avrt.dll";
-
- // Begin with the WAVEFORMATEX structure that specifies the basic format.
- WAVEFORMATEX* format = &format_.Format;
- format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format->nChannels = params.channels();
- format->nSamplesPerSec = params.sample_rate();
- format->wBitsPerSample = params.bits_per_sample();
- format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
- format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
- format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
-
- // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
- format_.Samples.wValidBitsPerSample = params.bits_per_sample();
- format_.dwChannelMask = KSAUDIO_SPEAKER_STEREO;
- format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
-
- // Store size (in different units) of audio packets which we expect to
- // get from the audio endpoint device in each render event.
- packet_size_frames_ = params.GetBytesPerBuffer() / format->nBlockAlign;
- float packet_size_ms = (1000.0 * packet_size_frames_) / params.sample_rate();
- DVLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
- DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
- DVLOG(1) << "Number of milliseconds per packet: " << packet_size_ms;
-
- // All events are auto-reset events and non-signaled initially.
-
- // Create the event which the audio engine will signal each time a buffer
- // has been recorded.
- capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-
- // Create the event which will be set in Stop() when straeming shall stop.
- stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-}
-
-WASAPIUnifiedStream::~WASAPIUnifiedStream() {
-}
-
-bool WASAPIUnifiedStream::Open() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (opened_)
- return true;
-
- if (!HasUnifiedDefaultIO()) {
- LOG(ERROR) << "Unified audio I/O is not supported.";
- return false;
- }
-
- // Render side:
-
- ScopedComPtr<IAudioClient> audio_output_client =
- CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- if (!audio_output_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
- share_mode_,
- &format_)) {
- return false;
- }
-
- HRESULT hr = S_FALSE;
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_output_client, &format_, NULL,
- &endpoint_render_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioRenderClient> audio_render_client =
- CoreAudioUtil::CreateRenderClient(audio_output_client);
- if (!audio_render_client)
- return false;
-
- // Capture side:
-
- ScopedComPtr<IAudioClient> audio_input_client =
- CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
- if (!audio_input_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
- share_mode_,
- &format_)) {
- return false;
- }
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Include valid event handle for event-driven initialization.
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_input_client, &format_, capture_event_.Get(),
- &endpoint_capture_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioCaptureClient> audio_capture_client =
- CoreAudioUtil::CreateCaptureClient(audio_input_client);
- if (!audio_capture_client)
- return false;
-
- // Store all valid COM interfaces.
- audio_output_client_ = audio_output_client;
- audio_render_client_ = audio_render_client;
- audio_input_client_ = audio_input_client;
- audio_capture_client_ = audio_capture_client;
-
- opened_ = true;
- return SUCCEEDED(hr);
-}
-
-void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- CHECK(callback);
- CHECK(opened_);
-
- if (audio_io_thread_.get()) {
- CHECK_EQ(callback, source_);
- return;
- }
-
- source_ = callback;
-
- // Create and start the thread that will capturing and rendering.
- audio_io_thread_.reset(
- new base::DelegateSimpleThread(this, "wasapi_io_thread"));
- audio_io_thread_->Start();
- if (!audio_io_thread_->HasBeenStarted()) {
- DLOG(ERROR) << "Failed to start WASAPI IO thread.";
- return;
- }
-
- // Start input streaming data between the endpoint buffer and the audio
- // engine.
- HRESULT hr = audio_input_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-
- // Reset the counter for number of rendered frames taking into account the
- // fact that we always initialize the render side with silence.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK_EQ(num_queued_frames, endpoint_render_buffer_size_frames_);
- num_written_frames_ = num_queued_frames;
-
- // Start output streaming data between the endpoint buffer and the audio
- // engine.
- hr = audio_output_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-}
-
-void WASAPIUnifiedStream::Stop() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (!audio_io_thread_.get())
- return;
-
- // Stop input audio streaming.
- HRESULT hr = audio_input_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop input streaming: " << std::hex << hr;
- }
-
- // Stop output audio streaming.
- hr = audio_output_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop output streaming: " << std::hex << hr;
- }
-
- // Wait until the thread completes and perform cleanup.
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
-
- // Ensure that we don't quit the main thread loop immediately next
- // time Start() is called.
- ResetEvent(stop_streaming_event_.Get());
-
- // Clear source callback, it'll be set again on the next Start() call.
- source_ = NULL;
-
- // Flush all pending data and reset the audio clock stream position to 0.
- hr = audio_output_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset output streaming: " << std::hex << hr;
- }
-
- audio_input_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset input streaming: " << std::hex << hr;
- }
-
- // Extra safety check to ensure that the buffers are cleared.
- // If the buffers are not cleared correctly, the next call to Start()
- // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
- // TODO(henrika): this check is is only needed for shared-mode streams.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK_EQ(0u, num_queued_frames);
-}
-
-void WASAPIUnifiedStream::Close() {
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
-
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- Stop();
-
- // Inform the audio manager that we have been closed. This will cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void WASAPIUnifiedStream::SetVolume(double volume) {
- NOTIMPLEMENTED();
-}
-
-void WASAPIUnifiedStream::GetVolume(double* volume) {
- NOTIMPLEMENTED();
-}
-
-// static
-bool WASAPIUnifiedStream::HasUnifiedDefaultIO() {
- AudioParameters in_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eCapture, eConsole,
- &in_params);
- if (FAILED(hr))
- return false;
-
- AudioParameters out_params;
- hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
- &out_params);
- if (FAILED(hr))
- return false;
-
- return CompareAudioParameters(in_params, out_params);
-}
-
-void WASAPIUnifiedStream::Run() {
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Increase the thread priority.
- audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
-
- // Enable MMCSS to ensure that this thread receives prioritized access to
- // CPU resources.
- // TODO(henrika): investigate if it is possible to include these additional
- // settings in SetThreadPriority() as well.
- DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
- bool mmcss_is_ok =
- (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
- if (!mmcss_is_ok) {
- // Failed to enable MMCSS on this thread. It is not fatal but can lead
- // to reduced QoS at high load.
- DWORD err = GetLastError();
- LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
- }
-
- // The IAudioClock interface enables us to monitor a stream's data
- // rate and the current position in the stream. Allocate it before we
- // start spinning.
- ScopedComPtr<IAudioClock> audio_output_clock;
- HRESULT hr = audio_output_client_->GetService(
- __uuidof(IAudioClock), audio_output_clock.ReceiveVoid());
- LOG_IF(WARNING, FAILED(hr)) << "Failed to create IAudioClock: "
- << std::hex << hr;
-
- // Stores a delay measurement (unit is in bytes). This variable is not
- // updated at each event, but the update frequency is set by a constant
- // called |kTimeDiffInMillisecondsBetweenDelayMeasurements|.
- int total_delay_bytes = 0;
-
- bool streaming = true;
- bool error = false;
- HANDLE wait_array[] = { stop_streaming_event_,
- capture_event_ };
-
- const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
-
- // Keep streaming audio until the stop, or error, event is signaled.
- // The current implementation uses capture events as driving mechanism since
- // extensive testing has shown that it gives us a more reliable callback
- // sequence compared with a scheme where both capture and render events are
- // utilized.
- while (streaming && !error) {
- // Wait for a close-down event, or a new capture event.
- DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
- wait_array,
- FALSE,
- INFINITE);
- switch (wait_result) {
- case WAIT_OBJECT_0 + 0:
- // |stop_streaming_event_| has been set.
- streaming = false;
- break;
- case WAIT_OBJECT_0 + 1:
- // |capture_event_| has been set
- {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Run");
-
- // --- Capture ---
-
- BYTE* data_ptr = NULL;
- UINT32 num_captured_frames = 0;
- DWORD flags = 0;
- UINT64 device_position = 0;
- UINT64 capture_time_stamp = 0;
-
- base::TimeTicks now_tick = base::TimeTicks::HighResNow();
-
- // Retrieve the amount of data in the capture endpoint buffer.
- // |endpoint_capture_time_stamp| is the value of the performance
- // counter at the time that the audio endpoint device recorded
- // the device position of the first audio frame in the data packet.
- hr = audio_capture_client_->GetBuffer(&data_ptr,
- &num_captured_frames,
- &flags,
- &device_position,
- &capture_time_stamp);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to get data from the capture buffer";
- continue;
- }
-
- if (num_captured_frames != 0) {
- if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
- // Clear out the capture buffer since silence is reported.
- capture_bus_->Zero();
- } else {
- // Store captured data in an audio bus after de-interleaving
- // the data to match the audio bus structure.
- capture_bus_->FromInterleaved(
- data_ptr, num_captured_frames, bytes_per_sample);
- }
- }
-
- hr = audio_capture_client_->ReleaseBuffer(num_captured_frames);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
-
- // Save resource by not asking for new delay estimates each time.
- // These estimates are fairly stable and it is perfectly safe to only
- // sample at a rate of ~1Hz.
- // TODO(henrika): it might be possible to use a fixed delay instead.
- if ((now_tick - last_delay_sample_time_).InMilliseconds() >
- kTimeDiffInMillisecondsBetweenDelayMeasurements) {
- // Calculate the estimated capture delay, i.e., the latency between
- // the recording time and the time we when we are notified about
- // the recorded data. Note that the capture time stamp is given in
- // 100-nanosecond (0.1 microseconds) units.
- base::TimeDelta diff = now_tick -
- base::TimeTicks::FromInternalValue(0.1 * capture_time_stamp);
- const double capture_delay_ms = diff.InMillisecondsF();
-
- // Calculate the estimated render delay, i.e., the time difference
- // between the time when data is added to the endpoint buffer and
- // when the data is played out on the actual speaker.
- const double stream_pos = CurrentStreamPosInMilliseconds(
- num_written_frames_ + packet_size_frames_,
- format_.Format.nSamplesPerSec);
- const double speaker_pos =
- SpeakerStreamPosInMilliseconds(audio_output_clock);
- const double render_delay_ms = stream_pos - speaker_pos;
-
- // Derive the total delay, i.e., the sum of the input and output
- // delays. Also convert the value into byte units.
- total_delay_ms_ = capture_delay_ms + render_delay_ms;
- last_delay_sample_time_ = now_tick;
- DVLOG(3) << "total_delay_ms : " << total_delay_ms_;
- total_delay_bytes = MillisecondsToBytes(total_delay_ms_, format_);
- }
-
- // Prepare for rendering by calling OnMoreIOData().
- int frames_filled = source_->OnMoreIOData(
- capture_bus_.get(),
- render_bus_.get(),
- AudioBuffersState(0, total_delay_bytes));
- DCHECK_EQ(frames_filled, render_bus_->frames());
-
- // --- Render ---
-
- // Keep track of number of rendered frames since we need it for
- // our delay calculations.
- num_written_frames_ += frames_filled;
-
- // Derive the the amount of available space in the endpoint buffer.
- // Avoid render attempt if there is no room for a captured packet.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- if (endpoint_render_buffer_size_frames_ - num_queued_frames <
- packet_size_frames_)
- continue;
-
- // Grab all available space in the rendering endpoint buffer
- // into which the client can write a data packet.
- uint8* audio_data = NULL;
- hr = audio_render_client_->GetBuffer(packet_size_frames_,
- &audio_data);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to access render buffer";
- continue;
- }
-
- // Convert the audio bus content to interleaved integer data using
- // |audio_data| as destination.
- render_bus_->ToInterleaved(
- packet_size_frames_, bytes_per_sample, audio_data);
-
- // Release the buffer space acquired in the GetBuffer() call.
- audio_render_client_->ReleaseBuffer(packet_size_frames_, 0);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer";
- }
- break;
- default:
- error = true;
- break;
- }
- }
-
- if (streaming && error) {
- // Stop audio streaming since something has gone wrong in our main thread
- // loop. Note that, we are still in a "started" state, hence a Stop() call
- // is required to join the thread properly.
- audio_input_client_->Stop();
- audio_output_client_->Stop();
- PLOG(ERROR) << "WASAPI streaming failed.";
- }
-
- // Disable MMCSS.
- if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
- PLOG(WARNING) << "Failed to disable MMCSS";
- }
-}
-
-void WASAPIUnifiedStream::HandleError(HRESULT err) {
- CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) ||
- (!started() && GetCurrentThreadId() == creating_thread_id_));
- NOTREACHED() << "Error code: " << std::hex << err;
- if (source_)
- source_->OnError(this, static_cast<int>(err));
-}
-
-void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
- CHECK(GetCurrentThreadId() == creating_thread_id_);
- DCHECK(audio_io_thread_.get());
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
- HandleError(err);
-}
-
-} // namespace media
diff --git a/src/media/audio/win/audio_unified_win.h b/src/media/audio/win/audio_unified_win.h
deleted file mode 100644
index 0e8e829..0000000
--- a/src/media/audio/win/audio_unified_win.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-
-#include <Audioclient.h>
-#include <MMDeviceAPI.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_comptr.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// Implementation of AudioOutputStream for Windows using the Core Audio API
-// where both capturing and rendering takes place on the same thread to enable
-// audio I/O.
-//
-// The user should also ensure that audio I/O is supported by calling
-// HasUnifiedDefaultIO().
-//
-// Implementation notes:
-//
-// - Certain conditions must be fulfilled to support audio I/O:
-// o Both capture and render side must use the same sample rate.
-// o Both capture and render side must use the same channel count.
-// o Both capture and render side must use the same channel configuration.
-// o See HasUnifiedDefaultIO() for more details.
-//
-// TODO(henrika):
-//
-// - Add support for exclusive mode.
-// - Add multi-channel support.
-// - Add support for non-matching sample rates.
-//
-class MEDIA_EXPORT WASAPIUnifiedStream
- : public AudioOutputStream,
- public base::DelegateSimpleThread::Delegate {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~WASAPIUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- // Returns true if all conditions to support audio IO are fulfilled.
- // Input and output sides of the Audio Engine must use the same native
- // device period (requires e.g. identical sample rates) and have the same
- // channel count.
- static bool HasUnifiedDefaultIO();
-
- bool started() const {
- return audio_io_thread_.get() != NULL;
- }
-
- private:
- // DelegateSimpleThread::Delegate implementation.
- virtual void Run() OVERRIDE;
-
- // Issues the OnError() callback to the |source_|.
- void HandleError(HRESULT err);
-
- // Stops and joins the audio thread in case of an error.
- void StopAndJoinThread(HRESULT err);
-
- // Converts unique endpoint ID to user-friendly device name.
- std::string GetDeviceName(LPCWSTR device_id) const;
-
- // Returns the number of channels the audio engine uses for its internal
- // processing/mixing of shared-mode streams for the default endpoint device.
- int endpoint_channel_count() { return format_.Format.nChannels; }
-
- // Contains the thread ID of the creating thread.
- base::PlatformThreadId creating_thread_id_;
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
-
- // The sharing mode for the streams.
- // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
- // where AUDCLNT_SHAREMODE_SHARED is the default.
- AUDCLNT_SHAREMODE share_mode_;
-
- // Rendering and capturing is driven by this thread (no message loop).
- // All OnMoreIOData() callbacks will be called from this thread.
- scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
-
- // Contains the desired audio format which is set up at construction.
- // Extended PCM waveform format structure based on WAVEFORMATEXTENSIBLE.
- // Use this for multiple channel and hi-resolution PCM data.
- WAVEFORMATPCMEX format_;
-
- // True when successfully opened.
- bool opened_;
-
- // Size in bytes of each audio frame (4 bytes for 16-bit stereo PCM).
- size_t frame_size_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the source is expected to deliver
- // in each OnMoreIOData() callback.
- size_t packet_size_frames_;
-
- // Length of the audio endpoint buffer.
- size_t endpoint_render_buffer_size_frames_;
- size_t endpoint_capture_buffer_size_frames_;
-
- // Counts the number of audio frames written to the endpoint buffer.
- uint64 num_written_frames_;
-
- // Time stamp for last delay measurement.
- base::TimeTicks last_delay_sample_time_;
-
- // Contains the total (sum of render and capture) delay in milliseconds.
- double total_delay_ms_;
-
- // Pointer to the client that will deliver audio samples to be played out.
- AudioSourceCallback* source_;
-
- // IMMDevice interfaces which represents audio endpoint devices.
- base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
- base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
-
- // IAudioClient interfaces which enables a client to create and initialize
- // an audio stream between an audio application and the audio engine.
- base::win::ScopedComPtr<IAudioClient> audio_output_client_;
- base::win::ScopedComPtr<IAudioClient> audio_input_client_;
-
- // IAudioRenderClient interfaces enables a client to write output
- // data to a rendering endpoint buffer.
- base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
-
- // IAudioCaptureClient interfaces enables a client to read input
- // data from a capturing endpoint buffer.
- base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
-
- // The audio engine will signal this event each time a buffer has been
- // recorded.
- base::win::ScopedHandle capture_event_;
-
- // This event will be signaled when streaming shall stop.
- base::win::ScopedHandle stop_streaming_event_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> render_bus_;
-
- // Container for sending data to AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> capture_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/src/media/audio/win/audio_unified_win_unittest.cc b/src/media/audio/win/audio_unified_win_unittest.cc
deleted file mode 100644
index 4d5f41b..0000000
--- a/src/media/audio/win/audio_unified_win_unittest.cc
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/file_util.h"
-#include "base/message_loop.h"
-#include "base/path_service.h"
-#include "base/test/test_timeouts.h"
-#include "base/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_unified_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/media_switches.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::Between;
-using ::testing::DoAll;
-using ::testing::NotNull;
-using ::testing::Return;
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const size_t kMaxDeltaSamples = 1000;
-static const char* kDeltaTimeMsFileName = "unified_delta_times_ms.txt";
-
-// Verify that the delay estimate in the OnMoreIOData() callback is larger
-// than an expected minumum value.
-MATCHER_P(DelayGreaterThan, value, "") {
- return (arg.hardware_delay_bytes > value.hardware_delay_bytes);
-}
-
-// Used to terminate a loop from a different thread than the loop belongs to.
-// |loop| should be a MessageLoopProxy.
-ACTION_P(QuitLoop, loop) {
- loop->PostTask(FROM_HERE, MessageLoop::QuitClosure());
-}
-
-class MockUnifiedSourceCallback
- : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD2(OnError, void(AudioOutputStream* stream, int code));
-};
-
-// AudioOutputStream::AudioSourceCallback implementation which enables audio
-// play-through. It also creates a text file that contains times between two
-// successive callbacks. Units are in milliseconds. This file can be used for
-// off-line analysis of the callback sequence.
-class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- explicit UnifiedSourceCallback()
- : previous_call_time_(base::Time::Now()),
- text_file_(NULL),
- elements_to_write_(0) {
- delta_times_.reset(new int[kMaxDeltaSamples]);
- }
-
- virtual ~UnifiedSourceCallback() {
- FilePath file_name;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
- file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
-
- EXPECT_TRUE(!text_file_);
- text_file_ = file_util::OpenFile(file_name, "wt");
- DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
- LOG(INFO) << ">> Output file " << file_name.value() << " has been created.";
-
- // Write the array which contains delta times to a text file.
- size_t elements_written = 0;
- while (elements_written < elements_to_write_) {
- fprintf(text_file_, "%d\n", delta_times_[elements_written]);
- ++elements_written;
- }
- file_util::CloseFile(text_file_);
- }
-
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- };
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- // Store time between this callback and the previous callback.
- int diff = (base::Time::Now() - previous_call_time_).InMilliseconds();
- previous_call_time_ = base::Time::Now();
- if (elements_to_write_ < kMaxDeltaSamples) {
- delta_times_[elements_to_write_] = diff;
- ++elements_to_write_;
- }
-
- // Play out the recorded audio samples in loop back.
- source->CopyTo(dest);
- return source->frames();
- };
-
- virtual void OnError(AudioOutputStream* stream, int code) {
- NOTREACHED();
- }
-
- private:
- base::Time previous_call_time_;
- scoped_array<int> delta_times_;
- FILE* text_file_;
- size_t elements_to_write_;
-};
-
-// Convenience method which ensures that we fulfill all required conditions
-// to run unified audio tests on Windows.
-static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kEnableWebAudioInput)) {
- DVLOG(1) << "--enable-webaudio-input must be defined to run this test.";
- return false;
- }
-
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This tests requires Windows Vista or higher.";
- return false;
- }
-
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output devices detected.";
- return false;
- }
-
- if (!audio_man->HasAudioInputDevices()) {
- LOG(WARNING) << "No input devices detected.";
- return false;
- }
-
- if (!WASAPIUnifiedStream::HasUnifiedDefaultIO()) {
- LOG(WARNING) << "Audio IO is not supported.";
- return false;
- }
-
- return true;
-}
-
-// Convenience class which simplifies creation of a unified AudioOutputStream
-// object.
-class AudioUnifiedStreamWrapper {
- public:
- explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager) {
- // We open up both both sides (input and output) using the preferred
- // set of audio parameters. These parameters corresponds to the mix format
- // that the audio engine uses internally for processing of shared-mode
- // output streams.
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, ¶ms_)));
- }
-
- ~AudioUnifiedStreamWrapper() {}
-
- // Creates AudioOutputStream object using default parameters.
- WASAPIUnifiedStream* Create() {
- return static_cast<WASAPIUnifiedStream*> (CreateOutputStream());
- }
-
- AudioParameters::Format format() const { return params_.format(); }
- int channels() const { return params_.channels(); }
- int bits_per_sample() const { return params_.bits_per_sample(); }
- int sample_rate() const { return params_.sample_rate(); }
- int frames_per_buffer() const { return params_.frames_per_buffer(); }
- int bytes_per_buffer() const { return params_.GetBytesPerBuffer(); }
-
- private:
- AudioOutputStream* CreateOutputStream() {
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_);
- EXPECT_TRUE(aos);
- return aos;
- }
-
- ScopedCOMInitializer com_init_;
- AudioManager* audio_man_;
- AudioParameters params_;
-};
-
-// Convenience method which creates a default WASAPIUnifiedStream object.
-static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
- AudioManager* audio_manager) {
- AudioUnifiedStreamWrapper aosw(audio_manager);
- return aosw.Create();
-}
-
-// Test Open(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
- EXPECT_TRUE(wus->Open());
- wus->Close();
-}
-
-// Test Open(), Start(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus, _))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
- .Times(Between(0, 1))
- .WillOnce(Return(ausw.frames_per_buffer()));
- wus->Start(&source);
- wus->Close();
-}
-
-// Verify that IO callbacks starts as they should.
-TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- // Set up expected minimum delay estimation where we use a minium delay
- // which is equal to the sum of render and capture sizes. We can never
- // reach a delay lower than this value.
- AudioBuffersState min_total_audio_delay(0, 2 * ausw.bytes_per_buffer());
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus, _))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(
- NotNull(), NotNull(), DelayGreaterThan(min_total_audio_delay)))
- .Times(AtLeast(2))
- .WillOnce(Return(ausw.frames_per_buffer()))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(ausw.frames_per_buffer())));
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- wus->Stop();
- wus->Close();
-}
-
-// Perform a real-time test in loopback where the recorded audio is echoed
-// back to the speaker. This test allows the user to verify that the audio
-// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
-TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::Create());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- MessageLoopForUI loop;
- UnifiedSourceCallback source;
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
-
- EXPECT_TRUE(wus->Open());
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(10000));
- loop.Run();
- wus->Close();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/avrt_wrapper_win.cc b/src/media/audio/win/avrt_wrapper_win.cc
deleted file mode 100644
index c9f1599..0000000
--- a/src/media/audio/win/avrt_wrapper_win.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/avrt_wrapper_win.h"
-
-#include "base/logging.h"
-
-namespace avrt {
-
-// Function pointers
-typedef BOOL (WINAPI *AvRevertMmThreadCharacteristicsFn)(HANDLE);
-typedef HANDLE (WINAPI *AvSetMmThreadCharacteristicsFn)(LPCWSTR, LPDWORD);
-typedef BOOL (WINAPI *AvSetMmThreadPriorityFn)(HANDLE, AVRT_PRIORITY);
-
-HMODULE g_avrt = NULL;
-AvRevertMmThreadCharacteristicsFn g_revert_mm_thread_characteristics = NULL;
-AvSetMmThreadCharacteristicsFn g_set_mm_thread_characteristics = NULL;
-AvSetMmThreadPriorityFn g_set_mm_thread_priority = NULL;
-
-bool Initialize() {
- if (!g_set_mm_thread_priority) {
- // The avrt.dll is available on Windows Vista and later.
- wchar_t path[MAX_PATH] = {0};
- ExpandEnvironmentStrings(L"%WINDIR%\\system32\\avrt.dll", path,
- arraysize(path));
- g_avrt = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
- if (!g_avrt)
- return false;
-
- g_revert_mm_thread_characteristics =
- reinterpret_cast<AvRevertMmThreadCharacteristicsFn>(
- GetProcAddress(g_avrt, "AvRevertMmThreadCharacteristics"));
- g_set_mm_thread_characteristics =
- reinterpret_cast<AvSetMmThreadCharacteristicsFn>(
- GetProcAddress(g_avrt, "AvSetMmThreadCharacteristicsW"));
- g_set_mm_thread_priority = reinterpret_cast<AvSetMmThreadPriorityFn>(
- GetProcAddress(g_avrt, "AvSetMmThreadPriority"));
- }
-
- return (g_avrt && g_revert_mm_thread_characteristics &&
- g_set_mm_thread_characteristics && g_set_mm_thread_priority);
-}
-
-bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle) {
- DCHECK(g_revert_mm_thread_characteristics);
- return (g_revert_mm_thread_characteristics &&
- g_revert_mm_thread_characteristics(avrt_handle));
-}
-
-HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name,
- DWORD* task_index) {
- DCHECK(g_set_mm_thread_characteristics);
- return (g_set_mm_thread_characteristics ?
- g_set_mm_thread_characteristics(task_name, task_index) : NULL);
-}
-
-bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority) {
- DCHECK(g_set_mm_thread_priority);
- return (g_set_mm_thread_priority &&
- g_set_mm_thread_priority(avrt_handle, priority));
-}
-
-} // namespace avrt
diff --git a/src/media/audio/win/avrt_wrapper_win.h b/src/media/audio/win/avrt_wrapper_win.h
deleted file mode 100644
index 8127b6b..0000000
--- a/src/media/audio/win/avrt_wrapper_win.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// The avrt namespace encapsulates the details needed to support MMCSS.
-//
-// The Multimedia Class Scheduler service (MMCSS) enables multimedia
-// applications to ensure that their time-sensitive processing receives
-// prioritized access to CPU resources. This service enables multimedia
-// applications to utilize as much of the CPU as possible without denying
-// CPU resources to lower-priority applications.
-// MMCSS requires Windows Vista or higher and that the Avrt DLL is loaded.
-//
-// TODO(henrika): refactor and merge into existing thread implementation
-// for Windows to ensure that MMCSS can be enabled for all threads.
-//
-#ifndef MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
-#define MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
-
-#include <windows.h>
-#include <avrt.h>
-
-#include "base/basictypes.h"
-
-namespace avrt {
-
-// Loads the Avrt.dll which is available on Windows Vista and later.
-bool Initialize();
-
-// Function wrappers for the underlying MMCSS functions.
-bool AvRevertMmThreadCharacteristics(HANDLE avrt_handle);
-HANDLE AvSetMmThreadCharacteristics(const wchar_t* task_name,
- DWORD* task_index);
-bool AvSetMmThreadPriority(HANDLE avrt_handle, AVRT_PRIORITY priority);
-
-} // namespace avrt
-
-#endif // MEDIA_AUDIO_WIN_AVRT_WRAPPER_WIN_H_
-
diff --git a/src/media/audio/win/core_audio_util_win.cc b/src/media/audio/win/core_audio_util_win.cc
deleted file mode 100644
index b501cbf..0000000
--- a/src/media/audio/win/core_audio_util_win.cc
+++ /dev/null
@@ -1,580 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/core_audio_util_win.h"
-
-#include <Audioclient.h>
-#include <Functiondiscoverykeys_devpkey.h>
-
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/stringprintf.h"
-#include "base/utf_string_conversions.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_handle.h"
-#include "base/win/windows_version.h"
-#include "media/base/media_switches.h"
-
-using base::win::ScopedCoMem;
-using base::win::ScopedHandle;
-
-namespace media {
-
-typedef uint32 ChannelConfig;
-
-// Converts Microsoft's channel configuration to ChannelLayout.
-// This mapping is not perfect but the best we can do given the current
-// ChannelLayout enumerator and the Windows-specific speaker configurations
-// defined in ksmedia.h. Don't assume that the channel ordering in
-// ChannelLayout is exactly the same as the Windows specific configuration.
-// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
-// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
-// speakers are different in these two definitions.
-static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
- switch (config) {
- case KSAUDIO_SPEAKER_DIRECTOUT:
- DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
- return CHANNEL_LAYOUT_NONE;
- case KSAUDIO_SPEAKER_MONO:
- DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
- return CHANNEL_LAYOUT_MONO;
- case KSAUDIO_SPEAKER_STEREO:
- DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
- return CHANNEL_LAYOUT_STEREO;
- case KSAUDIO_SPEAKER_QUAD:
- DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
- return CHANNEL_LAYOUT_QUAD;
- case KSAUDIO_SPEAKER_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
- return CHANNEL_LAYOUT_4_0;
- case KSAUDIO_SPEAKER_5POINT1:
- DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
- return CHANNEL_LAYOUT_5_1_BACK;
- case KSAUDIO_SPEAKER_5POINT1_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
- return CHANNEL_LAYOUT_5_1;
- case KSAUDIO_SPEAKER_7POINT1:
- DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
- return CHANNEL_LAYOUT_7_1_WIDE;
- case KSAUDIO_SPEAKER_7POINT1_SURROUND:
- DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
- return CHANNEL_LAYOUT_7_1;
- default:
- DVLOG(2) << "Unsupported channel layout: " << config;
- return CHANNEL_LAYOUT_UNSUPPORTED;
- }
-}
-
-// Scoped PROPVARIANT class for automatically freeing a COM PROPVARIANT
-// structure at the end of a scope.
-class ScopedPropertyVariant {
- public:
- ScopedPropertyVariant() {
- PropVariantInit(&propvar_);
- }
- ~ScopedPropertyVariant() {
- PropVariantClear(&propvar_);
- }
-
- // Retrieves the pointer address.
- // Used to receive a PROPVARIANT as an out argument (and take ownership).
- PROPVARIANT* Receive() {
- DCHECK_EQ(propvar_.vt, VT_EMPTY);
- return &propvar_;
- }
-
- VARTYPE type() const {
- return propvar_.vt;
- }
-
- LPWSTR as_wide_string() const {
- DCHECK_EQ(type(), VT_LPWSTR);
- return propvar_.pwszVal;
- }
-
- private:
- PROPVARIANT propvar_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedPropertyVariant);
-};
-
-bool CoreAudioUtil::IsSupported() {
- // Microsoft does not plan to make the Core Audio APIs available for use
- // with earlier versions of Windows, including Microsoft Windows Server 2003,
- // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
- return (base::win::GetVersion() >= base::win::VERSION_VISTA);
-}
-
-base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
- // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
- return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5);
-}
-
-AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
- return AUDCLNT_SHAREMODE_EXCLUSIVE;
- return AUDCLNT_SHAREMODE_SHARED;
-}
-
-int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
- DCHECK(CoreAudioUtil::IsSupported());
- // Create the IMMDeviceEnumerator interface.
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
- CreateDeviceEnumerator();
- if (!device_enumerator)
- return 0;
-
- // Generate a collection of active (present and not disabled) audio endpoint
- // devices for the specified data-flow direction.
- // This method will succeed even if all devices are disabled.
- ScopedComPtr<IMMDeviceCollection> collection;
- HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
- DEVICE_STATE_ACTIVE,
- collection.Receive());
- if (FAILED(hr)) {
- LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
- return 0;
- }
-
- // Retrieve the number of active audio devices for the specified direction
- UINT number_of_active_devices = 0;
- collection->GetCount(&number_of_active_devices);
- DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
- << "number of devices: " << number_of_active_devices;
- return static_cast<int>(number_of_active_devices);
-}
-
-ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- device_enumerator.ReceiveVoid());
- // CO_E_NOTINITIALIZED is the most likely reason for failure and if that
- // happens we might as well die here.
- CHECK(SUCCEEDED(hr));
- return device_enumerator;
-}
-
-ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
- ERole role) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDevice> endpoint_device;
-
- // Create the IMMDeviceEnumerator interface.
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
- CreateDeviceEnumerator();
- if (!device_enumerator)
- return endpoint_device;
-
- // Retrieve the default audio endpoint for the specified data-flow
- // direction and role.
- HRESULT hr = device_enumerator->GetDefaultAudioEndpoint(
- data_flow, role, endpoint_device.Receive());
-
- if (FAILED(hr)) {
- DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
- << std::hex << hr;
- return endpoint_device;
- }
-
- // Verify that the audio endpoint device is active, i.e., that the audio
- // adapter that connects to the endpoint device is present and enabled.
- DWORD state = DEVICE_STATE_DISABLED;
- hr = endpoint_device->GetState(&state);
- if (SUCCEEDED(hr)) {
- if (!(state & DEVICE_STATE_ACTIVE)) {
- DVLOG(1) << "Selected endpoint device is not active";
- endpoint_device.Release();
- }
- }
- return endpoint_device;
-}
-
-ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
- const std::string& device_id) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDevice> endpoint_device;
-
- // Create the IMMDeviceEnumerator interface.
- ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
- CreateDeviceEnumerator();
- if (!device_enumerator)
- return endpoint_device;
-
- // Retrieve an audio device specified by an endpoint device-identification
- // string.
- HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
- endpoint_device.Receive());
- DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
- << std::hex << hr;
- return endpoint_device;
-}
-
-HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- // Retrieve unique name of endpoint device.
- // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
- AudioDeviceName device_name;
- ScopedCoMem<WCHAR> endpoint_device_id;
- HRESULT hr = device->GetId(&endpoint_device_id);
- if (FAILED(hr))
- return hr;
- WideToUTF8(endpoint_device_id, wcslen(endpoint_device_id),
- &device_name.unique_id);
-
- // Retrieve user-friendly name of endpoint device.
- // Example: "Microphone (Realtek High Definition Audio)".
- ScopedComPtr<IPropertyStore> properties;
- hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
- if (FAILED(hr))
- return hr;
- ScopedPropertyVariant friendly_name;
- hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
- if (FAILED(hr))
- return hr;
- if (friendly_name.as_wide_string()) {
- WideToUTF8(friendly_name.as_wide_string(),
- wcslen(friendly_name.as_wide_string()),
- &device_name.device_name);
- }
-
- *name = device_name;
- DVLOG(2) << "friendly name: " << device_name.device_name;
- DVLOG(2) << "unique id : " << device_name.unique_id;
- return hr;
-}
-
-std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
- if (!audio_device)
- return std::string();
-
- AudioDeviceName device_name;
- HRESULT hr = GetDeviceName(audio_device, &device_name);
- if (FAILED(hr))
- return std::string();
-
- return device_name.device_name;
-}
-
-bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
- ERole role,
- std::string device_id) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
- if (!device)
- return false;
-
- ScopedCoMem<WCHAR> default_device_id;
- HRESULT hr = device->GetId(&default_device_id);
- if (FAILED(hr))
- return false;
-
- std::string str_default;
- WideToUTF8(default_device_id, wcslen(default_device_id), &str_default);
- if (device_id.compare(str_default) != 0)
- return false;
- return true;
-}
-
-EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMEndpoint> endpoint;
- HRESULT hr = device->QueryInterface(endpoint.Receive());
- if (FAILED(hr)) {
- DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
- return eAll;
- }
-
- EDataFlow data_flow;
- hr = endpoint->GetDataFlow(&data_flow);
- if (FAILED(hr)) {
- DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
- return eAll;
- }
- return data_flow;
-}
-
-ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
- IMMDevice* audio_device) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- // Creates and activates an IAudioClient COM object given the selected
- // endpoint device.
- ScopedComPtr<IAudioClient> audio_client;
- HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
- CLSCTX_INPROC_SERVER,
- NULL,
- audio_client.ReceiveVoid());
- DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
- return audio_client;
-}
-
-ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
- EDataFlow data_flow, ERole role) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
- return (default_device ? CreateClient(default_device) :
- ScopedComPtr<IAudioClient>());
-}
-
-HRESULT CoreAudioUtil::GetSharedModeMixFormat(
- IAudioClient* client, WAVEFORMATPCMEX* format) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
- HRESULT hr = client->GetMixFormat(
- reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
- if (FAILED(hr))
- return hr;
-
- size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
- DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
-
- memcpy(format, format_pcmex, bytes);
-
- DVLOG(2) << "wFormatTag: 0x" << std::hex << format->Format.wFormatTag
- << ", nChannels: " << std::dec << format->Format.nChannels
- << ", nSamplesPerSec: " << format->Format.nSamplesPerSec
- << ", nAvgBytesPerSec: " << format->Format.nAvgBytesPerSec
- << ", nBlockAlign: " << format->Format.nBlockAlign
- << ", wBitsPerSample: " << format->Format.wBitsPerSample
- << ", cbSize: " << format->Format.cbSize
- << ", wValidBitsPerSample: " << format->Samples.wValidBitsPerSample
- << ", dwChannelMask: 0x" << std::hex << format->dwChannelMask;
-
- return hr;
-}
-
-bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
- AUDCLNT_SHAREMODE share_mode,
- const WAVEFORMATPCMEX* format) {
- DCHECK(CoreAudioUtil::IsSupported());
- ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
- HRESULT hr = client->IsFormatSupported(
- share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
- reinterpret_cast<WAVEFORMATEX**>(&closest_match));
-
- // This log can only be triggered for shared mode.
- DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
- << "but a closest match exists.";
- // This log can be triggered both for shared and exclusive modes.
- DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
- if (hr == S_FALSE) {
- DVLOG(2) << "wFormatTag: " << closest_match->Format.wFormatTag
- << ", nChannels: " << closest_match->Format.nChannels
- << ", nSamplesPerSec: " << closest_match->Format.nSamplesPerSec
- << ", wBitsPerSample: " << closest_match->Format.wBitsPerSample;
- }
-
- return (hr == S_OK);
-}
-
-HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
- AUDCLNT_SHAREMODE share_mode,
- REFERENCE_TIME* device_period) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- // Get the period of the engine thread.
- REFERENCE_TIME default_period = 0;
- REFERENCE_TIME minimum_period = 0;
- HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
- if (FAILED(hr))
- return hr;
-
- *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
- minimum_period;
- DVLOG(2) << "device_period: "
- << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
- << " [ms]";
- return hr;
-}
-
-HRESULT CoreAudioUtil::GetPreferredAudioParameters(
- IAudioClient* client, AudioParameters* params) {
- DCHECK(CoreAudioUtil::IsSupported());
- WAVEFORMATPCMEX format;
- HRESULT hr = GetSharedModeMixFormat(client, &format);
- if (FAILED(hr))
- return hr;
-
- REFERENCE_TIME default_period = 0;
- hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
- if (FAILED(hr))
- return hr;
-
- // Get the integer mask which corresponds to the channel layout the
- // audio engine uses for its internal processing/mixing of shared-mode
- // streams. This mask indicates which channels are present in the multi-
- // channel stream. The least significant bit corresponds with the Front Left
- // speaker, the next least significant bit corresponds to the Front Right
- // speaker, and so on, continuing in the order defined in KsMedia.h.
- // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
- // for more details.
- ChannelConfig channel_config = format.dwChannelMask;
-
- // Convert Microsoft's channel configuration to genric ChannelLayout.
- ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
-
- // Store preferred sample rate and buffer size.
- int sample_rate = format.Format.nSamplesPerSec;
- int frames_per_buffer = static_cast<int>(sample_rate *
- RefererenceTimeToTimeDelta(default_period).InSecondsF() + 0.5);
-
- // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
- // We use a hard-coded value of 16 bits per sample today even if most audio
- // engines does the actual mixing in 32 bits per sample.
- int bits_per_sample = 16;
-
- DVLOG(2) << "channel_layout : " << channel_layout;
- DVLOG(2) << "sample_rate : " << sample_rate;
- DVLOG(2) << "bits_per_sample : " << bits_per_sample;
- DVLOG(2) << "frames_per_buffer: " << frames_per_buffer;
-
- AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout,
- sample_rate,
- bits_per_sample,
- frames_per_buffer);
-
- *params = audio_params;
- return hr;
-}
-
-HRESULT CoreAudioUtil::GetPreferredAudioParameters(
- EDataFlow data_flow, ERole role, AudioParameters* params) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- ScopedComPtr<IAudioClient> client = CreateDefaultClient(data_flow, role);
- if (!client) {
- // Map NULL-pointer to new error code which can be different from the
- // actual error code. The exact value is not important here.
- return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
- }
- return GetPreferredAudioParameters(client, params);
-}
-
-HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
- const WAVEFORMATPCMEX* format,
- HANDLE event_handle,
- size_t* endpoint_buffer_size) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
-
- // Enable event-driven streaming if a valid event handle is provided.
- // After the stream starts, the audio engine will signal the event handle
- // to notify the client each time a buffer becomes ready to process.
- // Event-driven buffering is supported for both rendering and capturing.
- // Both shared-mode and exclusive-mode streams can use event-driven buffering.
- bool use_event = (event_handle != NULL &&
- event_handle != INVALID_HANDLE_VALUE);
- if (use_event)
- stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
- DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
-
- // Initialize the shared mode client for minimal delay.
- HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
- stream_flags,
- 0,
- 0,
- reinterpret_cast<const WAVEFORMATEX*>(format),
- NULL);
- if (FAILED(hr)) {
- DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
- return hr;
- }
-
- if (use_event) {
- hr = client->SetEventHandle(event_handle);
- if (FAILED(hr)) {
- DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
- return hr;
- }
- }
-
- UINT32 buffer_size_in_frames = 0;
- hr = client->GetBufferSize(&buffer_size_in_frames);
- if (FAILED(hr)) {
- DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
- return hr;
- }
-
- *endpoint_buffer_size = static_cast<size_t>(buffer_size_in_frames);
- DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
-
- // TODO(henrika): utilize when delay measurements are added.
- REFERENCE_TIME latency = 0;
- hr = client->GetStreamLatency(&latency);
- DVLOG(2) << "stream latency: "
- << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
- return hr;
-}
-
-ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
- IAudioClient* client) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- // Get access to the IAudioRenderClient interface. This interface
- // enables us to write output data to a rendering endpoint buffer.
- ScopedComPtr<IAudioRenderClient> audio_render_client;
- HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
- audio_render_client.ReceiveVoid());
- if (FAILED(hr)) {
- DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
- return ScopedComPtr<IAudioRenderClient>();
- }
-
- // TODO(henrika): verify that this scheme is the same for shared mode and
- // exclusive mode streams.
-
- // Avoid start-up glitches by filling up the endpoint buffer with "silence"
- // before starting the stream.
- UINT32 endpoint_buffer_size = 0;
- hr = client->GetBufferSize(&endpoint_buffer_size);
- DVLOG_IF(1, FAILED(hr)) << "IAudioClient::GetBufferSize: " << std::hex << hr;
-
- BYTE* data = NULL;
- hr = audio_render_client->GetBuffer(endpoint_buffer_size, &data);
- DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::GetBuffer: "
- << std::hex << hr;
- if (SUCCEEDED(hr)) {
- // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
- // explicitly write silence data to the rendering buffer.
- hr = audio_render_client->ReleaseBuffer(endpoint_buffer_size,
- AUDCLNT_BUFFERFLAGS_SILENT);
- DVLOG_IF(1, FAILED(hr)) << "IAudioRenderClient::ReleaseBuffer: "
- << std::hex << hr;
- }
-
- // Sanity check: verify that the endpoint buffer is filled with silence.
- UINT32 num_queued_frames = 0;
- client->GetCurrentPadding(&num_queued_frames);
- DCHECK(num_queued_frames == endpoint_buffer_size);
-
- return audio_render_client;
-}
-
-ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
- IAudioClient* client) {
- DCHECK(CoreAudioUtil::IsSupported());
-
- // Get access to the IAudioCaptureClient interface. This interface
- // enables us to read input data from a capturing endpoint buffer.
- ScopedComPtr<IAudioCaptureClient> audio_capture_client;
- HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
- audio_capture_client.ReceiveVoid());
- if (FAILED(hr)) {
- DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
- return ScopedComPtr<IAudioCaptureClient>();
- }
- return audio_capture_client;
-}
-
-} // namespace media
diff --git a/src/media/audio/win/core_audio_util_win.h b/src/media/audio/win/core_audio_util_win.h
deleted file mode 100644
index c8a37d6..0000000
--- a/src/media/audio/win/core_audio_util_win.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Utility methods for the Core Audio API on Windows.
-// Always ensure that Core Audio is supported before using these methods.
-// Use media::CoreAudioIsSupported() for this purpose.
-// Also, all methods must be called on a valid COM thread. This can be done
-// by using the base::win::ScopedCOMInitializer helper class.
-
-#ifndef MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
-#define MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
-
-#include <audioclient.h>
-#include <mmdeviceapi.h>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/time.h"
-#include "base/win/scoped_comptr.h"
-#include "media/audio/audio_device_name.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/media_export.h"
-
-using base::win::ScopedComPtr;
-
-namespace media {
-
-class MEDIA_EXPORT CoreAudioUtil {
- public:
- // Returns true if Windows Core Audio is supported.
- // Always verify that this method returns true before using any of the
- // methods in this class.
- static bool IsSupported();
-
- // Converts between reference time to base::TimeDelta.
- // One reference-time unit is 100 nanoseconds.
- // Example: double s = RefererenceTimeToTimeDelta(t).InMillisecondsF();
- static base::TimeDelta RefererenceTimeToTimeDelta(REFERENCE_TIME time);
-
- // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
- // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
- static AUDCLNT_SHAREMODE GetShareMode();
-
- // The Windows Multimedia Device (MMDevice) API enables audio clients to
- // discover audio endpoint devices and determine their capabilities.
-
- // Number of active audio devices in the specified flow data flow direction.
- // Set |data_flow| to eAll to retrieve the total number of active audio
- // devices.
- static int NumberOfActiveDevices(EDataFlow data_flow);
-
- // Creates an IMMDeviceEnumerator interface which provides methods for
- // enumerating audio endpoint devices.
- static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
-
- // Creates a default endpoint device that is specified by a data-flow
- // direction and role, e.g. default render device.
- static ScopedComPtr<IMMDevice> CreateDefaultDevice(
- EDataFlow data_flow, ERole role);
-
- // Creates an endpoint device that is specified by a unique endpoint device-
- // identification string.
- static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
-
- // Returns the unique ID and user-friendly name of a given endpoint device.
- // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
- // "Microphone (Realtek High Definition Audio)".
- static HRESULT GetDeviceName(IMMDevice* device, AudioDeviceName* name);
-
- // Gets the user-friendly name of the endpoint device which is represented
- // by a unique id in |device_id|.
- static std::string GetFriendlyName(const std::string& device_id);
-
- // Returns true if the provided unique |device_id| corresponds to the current
- // default device for the specified by a data-flow direction and role.
- static bool DeviceIsDefault(
- EDataFlow flow, ERole role, std::string device_id);
-
- // Query if the audio device is a rendering device or a capture device.
- static EDataFlow GetDataFlow(IMMDevice* device);
-
- // The Windows Audio Session API (WASAPI) enables client applications to
- // manage the flow of audio data between the application and an audio endpoint
- // device.
-
- // Create an IAudioClient interface for the default IMMDevice where
- // flow direction and role is define by |data_flow| and |role|.
- // The IAudioClient interface enables a client to create and initialize an
- // audio stream between an audio application and the audio engine (for a
- // shared-mode stream) or the hardware buffer of an audio endpoint device
- // (for an exclusive-mode stream).
- static ScopedComPtr<IAudioClient> CreateDefaultClient(EDataFlow data_flow,
- ERole role);
-
- // Create an IAudioClient interface for an existing IMMDevice given by
- // |audio_device|. Flow direction and role is define by the |audio_device|.
- static ScopedComPtr<IAudioClient> CreateClient(IMMDevice* audio_device);
-
- // Get the mix format that the audio engine uses internally for processing
- // of shared-mode streams. This format is not necessarily a format that the
- // audio endpoint device supports. Thus, the caller might not succeed in
- // creating an exclusive-mode stream with a format obtained by this method.
- static HRESULT GetSharedModeMixFormat(IAudioClient* client,
- WAVEFORMATPCMEX* format);
-
- // Returns true if the specified |client| supports the format in |format|
- // for the given |share_mode| (shared or exclusive).
- static bool IsFormatSupported(IAudioClient* client,
- AUDCLNT_SHAREMODE share_mode,
- const WAVEFORMATPCMEX* format);
-
- // For a shared-mode stream, the audio engine periodically processes the
- // data in the endpoint buffer at the period obtained in |device_period|.
- // For an exclusive mode stream, |device_period| corresponds to the minimum
- // time interval between successive processing by the endpoint device.
- // This period plus the stream latency between the buffer and endpoint device
- // represents the minimum possible latency that an audio application can
- // achieve. The time in |device_period| is expressed in 100-nanosecond units.
- static HRESULT GetDevicePeriod(IAudioClient* client,
- AUDCLNT_SHAREMODE share_mode,
- REFERENCE_TIME* device_period);
-
- // Get the preferred audio parameters for the specified |client| or the
- // given direction and role is define by |data_flow| and |role|.
- // The acquired values should only be utilized for shared mode streamed since
- // there are no preferred settings for an exclusive mode stream.
- static HRESULT GetPreferredAudioParameters(IAudioClient* client,
- AudioParameters* params);
- static HRESULT GetPreferredAudioParameters(EDataFlow data_flow, ERole role,
- AudioParameters* params);
-
- // After activating an IAudioClient interface on an audio endpoint device,
- // the client must initialize it once, and only once, to initialize the audio
- // stream between the client and the device. In shared mode, the client
- // connects indirectly through the audio engine which does the mixing.
- // In exclusive mode, the client connects directly to the audio hardware.
- // If a valid event is provided in |event_handle|, the client will be
- // initialized for event-driven buffer handling. If |event_handle| is set to
- // NULL, event-driven buffer handling is not utilized.
- static HRESULT SharedModeInitialize(IAudioClient* client,
- const WAVEFORMATPCMEX* format,
- HANDLE event_handle,
- size_t* endpoint_buffer_size);
- // TODO(henrika): add ExclusiveModeInitialize(...)
-
- // Create an IAudioRenderClient client for an existing IAudioClient given by
- // |client|. The IAudioRenderClient interface enables a client to write
- // output data to a rendering endpoint buffer.
- static ScopedComPtr<IAudioRenderClient> CreateRenderClient(
- IAudioClient* client);
-
- // Create an IAudioCaptureClient client for an existing IAudioClient given by
- // |client|. The IAudioCaptureClient interface enables a client to read
- // input data from a capture endpoint buffer.
- static ScopedComPtr<IAudioCaptureClient> CreateCaptureClient(
- IAudioClient* client);
-
- private:
- CoreAudioUtil() {}
- ~CoreAudioUtil() {}
- DISALLOW_COPY_AND_ASSIGN(CoreAudioUtil);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
diff --git a/src/media/audio/win/core_audio_util_win_unittest.cc b/src/media/audio/win/core_audio_util_win_unittest.cc
deleted file mode 100644
index b1edf47..0000000
--- a/src/media/audio/win/core_audio_util_win_unittest.cc
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/win/scoped_com_initializer.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-class CoreAudioUtilWinTest : public ::testing::Test {
- protected:
- // The test runs on a COM thread in the multithreaded apartment (MTA).
- // If we don't initialize the COM library on a thread before using COM,
- // all function calls will return CO_E_NOTINITIALIZED.
- CoreAudioUtilWinTest()
- : com_init_(ScopedCOMInitializer::kMTA) {
- DCHECK(com_init_.succeeded());
- }
- virtual ~CoreAudioUtilWinTest() {}
-
- bool CanRunAudioTest() {
- bool core_audio = CoreAudioUtil::IsSupported();
- if (!core_audio)
- return false;
- int capture_devices = CoreAudioUtil::NumberOfActiveDevices(eCapture);
- int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
- return ((capture_devices > 0) && (render_devices > 0));
- }
-
- ScopedCOMInitializer com_init_;
-};
-
-TEST_F(CoreAudioUtilWinTest, NumberOfActiveDevices) {
- if (!CanRunAudioTest())
- return;
-
- int render_devices = CoreAudioUtil::NumberOfActiveDevices(eRender);
- EXPECT_GT(render_devices, 0);
- int capture_devices = CoreAudioUtil::NumberOfActiveDevices(eCapture);
- EXPECT_GT(capture_devices, 0);
- int total_devices = CoreAudioUtil::NumberOfActiveDevices(eAll);
- EXPECT_EQ(total_devices, render_devices + capture_devices);
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateDeviceEnumerator) {
- if (!CanRunAudioTest())
- return;
-
- ScopedComPtr<IMMDeviceEnumerator> enumerator =
- CoreAudioUtil::CreateDeviceEnumerator();
- EXPECT_TRUE(enumerator);
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateDefaultDevice) {
- if (!CanRunAudioTest())
- return;
-
- struct {
- EDataFlow flow;
- ERole role;
- } data[] = {
- {eRender, eConsole},
- {eRender, eCommunications},
- {eRender, eMultimedia},
- {eCapture, eConsole},
- {eCapture, eCommunications},
- {eCapture, eMultimedia}
- };
-
- // Create default devices for all flow/role combinations above.
- ScopedComPtr<IMMDevice> audio_device;
- for (int i = 0; i < arraysize(data); ++i) {
- audio_device =
- CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
- EXPECT_TRUE(audio_device);
- EXPECT_EQ(data[i].flow, CoreAudioUtil::GetDataFlow(audio_device));
- }
-
- // Only eRender and eCapture are allowed as flow parameter.
- audio_device = CoreAudioUtil::CreateDefaultDevice(eAll, eConsole);
- EXPECT_FALSE(audio_device);
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateDevice) {
- if (!CanRunAudioTest())
- return;
-
- // Get name and ID of default device used for playback.
- ScopedComPtr<IMMDevice> default_render_device =
- CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- AudioDeviceName default_render_name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(default_render_device,
- &default_render_name)));
-
- // Use the uniqe ID as input to CreateDevice() and create a corresponding
- // IMMDevice.
- ScopedComPtr<IMMDevice> audio_device =
- CoreAudioUtil::CreateDevice(default_render_name.unique_id);
- EXPECT_TRUE(audio_device);
-
- // Verify that the two IMMDevice interfaces represents the same endpoint
- // by comparing their unique IDs.
- AudioDeviceName device_name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
- &device_name)));
- EXPECT_EQ(default_render_name.unique_id, device_name.unique_id);
-}
-
-TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceName) {
- if (!CanRunAudioTest())
- return;
-
- struct {
- EDataFlow flow;
- ERole role;
- } data[] = {
- {eRender, eConsole},
- {eRender, eCommunications},
- {eCapture, eConsole},
- {eCapture, eCommunications}
- };
-
- // Get name and ID of default devices for all flow/role combinations above.
- ScopedComPtr<IMMDevice> audio_device;
- AudioDeviceName device_name;
- for (int i = 0; i < arraysize(data); ++i) {
- audio_device =
- CoreAudioUtil::CreateDefaultDevice(data[i].flow, data[i].role);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device,
- &device_name)));
- EXPECT_FALSE(device_name.device_name.empty());
- EXPECT_FALSE(device_name.unique_id.empty());
- }
-}
-
-TEST_F(CoreAudioUtilWinTest, GetFriendlyName) {
- if (!CanRunAudioTest())
- return;
-
- // Get name and ID of default device used for recording.
- ScopedComPtr<IMMDevice> audio_device =
- CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
- AudioDeviceName device_name;
- HRESULT hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
- EXPECT_TRUE(SUCCEEDED(hr));
-
- // Use unique ID as input to GetFriendlyName() and compare the result
- // with the already obtained friendly name for the default capture device.
- std::string friendly_name = CoreAudioUtil::GetFriendlyName(
- device_name.unique_id);
- EXPECT_EQ(friendly_name, device_name.device_name);
-
- // Same test as above but for playback.
- audio_device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- hr = CoreAudioUtil::GetDeviceName(audio_device, &device_name);
- EXPECT_TRUE(SUCCEEDED(hr));
- friendly_name = CoreAudioUtil::GetFriendlyName(device_name.unique_id);
- EXPECT_EQ(friendly_name, device_name.device_name);
-}
-
-TEST_F(CoreAudioUtilWinTest, DeviceIsDefault) {
- if (!CanRunAudioTest())
- return;
-
- // Verify that the default render device is correctly identified as a
- // default device.
- ScopedComPtr<IMMDevice> audio_device =
- CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- AudioDeviceName name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
- const std::string id = name.unique_id;
- EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eRender, eConsole, id));
- EXPECT_FALSE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole, id));
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateDefaultClient) {
- if (!CanRunAudioTest())
- return;
-
- EDataFlow data[] = {eRender, eCapture};
-
- for (int i = 0; i < arraysize(data); ++i) {
- ScopedComPtr<IAudioClient> client;
- client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- }
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateClient) {
- if (!CanRunAudioTest())
- return;
-
- EDataFlow data[] = {eRender, eCapture};
-
- for (int i = 0; i < arraysize(data); ++i) {
- ScopedComPtr<IMMDevice> device;
- ScopedComPtr<IAudioClient> client;
- device = CoreAudioUtil::CreateDefaultDevice(data[i], eConsole);
- EXPECT_TRUE(device);
- EXPECT_EQ(data[i], CoreAudioUtil::GetDataFlow(device));
- client = CoreAudioUtil::CreateClient(device);
- EXPECT_TRUE(client);
- }
-}
-
-TEST_F(CoreAudioUtilWinTest, GetSharedModeMixFormat) {
- if (!CanRunAudioTest())
- return;
-
- ScopedComPtr<IMMDevice> device;
- ScopedComPtr<IAudioClient> client;
- device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- EXPECT_TRUE(device);
- client = CoreAudioUtil::CreateClient(device);
- EXPECT_TRUE(client);
-
- // Perform a simple sanity test of the aquired format structure.
- WAVEFORMATPCMEX format;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
- EXPECT_GE(format.Format.nChannels, 1);
- EXPECT_GE(format.Format.nSamplesPerSec, 8000u);
- EXPECT_GE(format.Format.wBitsPerSample, 16);
- EXPECT_GE(format.Samples.wValidBitsPerSample, 16);
- EXPECT_EQ(format.Format.wFormatTag, WAVE_FORMAT_EXTENSIBLE);
-}
-
-TEST_F(CoreAudioUtilWinTest, GetDevicePeriod) {
- if (!CanRunAudioTest())
- return;
-
- EDataFlow data[] = {eRender, eCapture};
-
- // Verify that the device periods are valid for the default render and
- // capture devices.
- for (int i = 0; i < arraysize(data); ++i) {
- ScopedComPtr<IAudioClient> client;
- REFERENCE_TIME shared_time_period = 0;
- REFERENCE_TIME exclusive_time_period = 0;
- client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
- client, AUDCLNT_SHAREMODE_SHARED, &shared_time_period)));
- EXPECT_GT(shared_time_period, 0);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDevicePeriod(
- client, AUDCLNT_SHAREMODE_EXCLUSIVE, &exclusive_time_period)));
- EXPECT_GT(exclusive_time_period, 0);
- EXPECT_LE(exclusive_time_period, shared_time_period);
- }
-}
-
-TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
- if (!CanRunAudioTest())
- return;
-
- EDataFlow data[] = {eRender, eCapture};
-
- // Verify that the preferred audio parameters are OK for the default render
- // and capture devices.
- for (int i = 0; i < arraysize(data); ++i) {
- ScopedComPtr<IAudioClient> client;
- AudioParameters params;
- client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(client,
- ¶ms)));
- EXPECT_TRUE(params.IsValid());
- }
-}
-
-TEST_F(CoreAudioUtilWinTest, SharedModeInitialize) {
- if (!CanRunAudioTest())
- return;
-
- ScopedComPtr<IAudioClient> client;
- client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
-
- WAVEFORMATPCMEX format;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
-
- // Perform a shared-mode initialization without event-driven buffer handling.
- size_t endpoint_buffer_size = 0;
- HRESULT hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- EXPECT_TRUE(SUCCEEDED(hr));
- EXPECT_GT(endpoint_buffer_size, 0u);
-
- // It is only possible to create a client once.
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- EXPECT_FALSE(SUCCEEDED(hr));
- EXPECT_EQ(hr, AUDCLNT_E_ALREADY_INITIALIZED);
-
- // Verify that it is possible to reinitialize the client after releasing it.
- client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- EXPECT_TRUE(SUCCEEDED(hr));
- EXPECT_GT(endpoint_buffer_size, 0u);
-
- // Use a non-supported format and verify that initialization fails.
- // A simple way to emulate an invalid format is to use the shared-mode
- // mixing format and modify the preferred sample.
- client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
- format.Format.nSamplesPerSec = format.Format.nSamplesPerSec + 1;
- EXPECT_FALSE(CoreAudioUtil::IsFormatSupported(
- client, AUDCLNT_SHAREMODE_SHARED, &format));
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- EXPECT_TRUE(FAILED(hr));
- EXPECT_EQ(hr, E_INVALIDARG);
-
- // Finally, perform a shared-mode initialization using event-driven buffer
- // handling. The event handle will be signaled when an audio buffer is ready
- // to be processed by the client (not verified here).
- // The event handle should be in the nonsignaled state.
- base::win::ScopedHandle event_handle(::CreateEvent(NULL, TRUE, FALSE, NULL));
- client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
- EXPECT_TRUE(CoreAudioUtil::IsFormatSupported(
- client, AUDCLNT_SHAREMODE_SHARED, &format));
- hr = CoreAudioUtil::SharedModeInitialize(client, &format, event_handle.Get(),
- &endpoint_buffer_size);
- EXPECT_TRUE(SUCCEEDED(hr));
- EXPECT_GT(endpoint_buffer_size, 0u);
-}
-
-TEST_F(CoreAudioUtilWinTest, CreateRenderAndCaptureClients) {
- if (!CanRunAudioTest())
- return;
-
- EDataFlow data[] = {eRender, eCapture};
-
- WAVEFORMATPCMEX format;
- size_t endpoint_buffer_size = 0;
-
- for (int i = 0; i < arraysize(data); ++i) {
- ScopedComPtr<IAudioClient> client;
- ScopedComPtr<IAudioRenderClient> render_client;
- ScopedComPtr<IAudioCaptureClient> capture_client;
-
- client = CoreAudioUtil::CreateDefaultClient(data[i], eConsole);
- EXPECT_TRUE(client);
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetSharedModeMixFormat(client,
- &format)));
- if (data[i] == eRender) {
- // It is not possible to create a render client using an unitialized
- // client interface.
- render_client = CoreAudioUtil::CreateRenderClient(client);
- EXPECT_FALSE(render_client);
-
- // Do a proper initialization and verify that it works this time.
- CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- render_client = CoreAudioUtil::CreateRenderClient(client);
- EXPECT_TRUE(render_client);
- EXPECT_GT(endpoint_buffer_size, 0u);
- } else if (data[i] == eCapture) {
- // It is not possible to create a capture client using an unitialized
- // client interface.
- capture_client = CoreAudioUtil::CreateCaptureClient(client);
- EXPECT_FALSE(capture_client);
-
- // Do a proper initialization and verify that it works this time.
- CoreAudioUtil::SharedModeInitialize(client, &format, NULL,
- &endpoint_buffer_size);
- capture_client = CoreAudioUtil::CreateCaptureClient(client);
- EXPECT_TRUE(capture_client);
- EXPECT_GT(endpoint_buffer_size, 0u);
- }
- }
-}
-
-//
-
-} // namespace media
diff --git a/src/media/audio/win/device_enumeration_win.cc b/src/media/audio/win/device_enumeration_win.cc
deleted file mode 100644
index 46aacb0..0000000
--- a/src/media/audio/win/device_enumeration_win.cc
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <MMDeviceAPI.h>
-#include <mmsystem.h>
-#include <Functiondiscoverykeys_devpkey.h> // MMDeviceAPI.h must come first
-
-#include "media/audio/win/audio_manager_win.h"
-
-#include "base/logging.h"
-#include "base/utf_string_conversions.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_comptr.h"
-
-using media::AudioDeviceNames;
-using base::win::ScopedComPtr;
-using base::win::ScopedCoMem;
-
-// Taken from Mmddk.h.
-#define DRV_RESERVED 0x0800
-#define DRV_QUERYFUNCTIONINSTANCEID (DRV_RESERVED + 17)
-#define DRV_QUERYFUNCTIONINSTANCEIDSIZE (DRV_RESERVED + 18)
-
-namespace media {
-
-bool GetInputDeviceNamesWin(AudioDeviceNames* device_names) {
- // It is assumed that this method is called from a COM thread, i.e.,
- // CoInitializeEx() is not called here again to avoid STA/MTA conflicts.
- ScopedComPtr<IMMDeviceEnumerator> enumerator;
- HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_INPROC_SERVER,
- __uuidof(IMMDeviceEnumerator),
- enumerator.ReceiveVoid());
- DCHECK_NE(CO_E_NOTINITIALIZED, hr);
- if (FAILED(hr)) {
- LOG(WARNING) << "Failed to create IMMDeviceEnumerator: " << std::hex << hr;
- return false;
- }
-
- // Generate a collection of active audio capture endpoint devices.
- // This method will succeed even if all devices are disabled.
- ScopedComPtr<IMMDeviceCollection> collection;
- hr = enumerator->EnumAudioEndpoints(eCapture,
- DEVICE_STATE_ACTIVE,
- collection.Receive());
- if (FAILED(hr))
- return false;
-
- // Retrieve the number of active capture devices.
- UINT number_of_active_devices = 0;
- collection->GetCount(&number_of_active_devices);
- if (number_of_active_devices == 0)
- return true;
-
- media::AudioDeviceName device;
-
- // Loop over all active capture devices and add friendly name and
- // unique ID to the |device_names| list.
- for (UINT i = 0; i < number_of_active_devices; ++i) {
- // Retrieve unique name of endpoint device.
- // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
- ScopedComPtr<IMMDevice> audio_device;
- hr = collection->Item(i, audio_device.Receive());
- if (FAILED(hr))
- continue;
-
- // Store the unique name.
- ScopedCoMem<WCHAR> endpoint_device_id;
- audio_device->GetId(&endpoint_device_id);
- device.unique_id = WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
-
- // Retrieve user-friendly name of endpoint device.
- // Example: "Microphone (Realtek High Definition Audio)".
- ScopedComPtr<IPropertyStore> properties;
- hr = audio_device->OpenPropertyStore(STGM_READ, properties.Receive());
- if (SUCCEEDED(hr)) {
- PROPVARIANT friendly_name;
- PropVariantInit(&friendly_name);
- hr = properties->GetValue(PKEY_Device_FriendlyName, &friendly_name);
-
- // Store the user-friendly name.
- if (SUCCEEDED(hr) &&
- friendly_name.vt == VT_LPWSTR && friendly_name.pwszVal) {
- device.device_name = WideToUTF8(friendly_name.pwszVal);
- }
- PropVariantClear(&friendly_name);
- }
-
- // Add combination of user-friendly and unique name to the output list.
- device_names->push_back(device);
- }
-
- return true;
-}
-
-bool GetInputDeviceNamesWinXP(AudioDeviceNames* device_names) {
- // Retrieve the number of active waveform input devices.
- UINT number_of_active_devices = waveInGetNumDevs();
- if (number_of_active_devices == 0)
- return true;
-
- media::AudioDeviceName device;
- WAVEINCAPS capabilities;
- MMRESULT err = MMSYSERR_NOERROR;
-
- // Loop over all active capture devices and add friendly name and
- // unique ID to the |device_names| list. Note that, for Wave on XP,
- // the "unique" name will simply be a copy of the friendly name since
- // there is no safe method to retrieve a unique device name on XP.
- for (UINT i = 0; i < number_of_active_devices; ++i) {
- // Retrieve the capabilities of the specified waveform-audio input device.
- err = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
- if (err != MMSYSERR_NOERROR)
- continue;
-
- // Store the user-friendly name. Max length is MAXPNAMELEN(=32)
- // characters and the name cane be truncated on XP.
- // Example: "Microphone (Realtek High Defini".
- device.device_name = WideToUTF8(capabilities.szPname);
-
- // Store the "unique" name (we use same as friendly name on Windows XP).
- device.unique_id = WideToUTF8(capabilities.szPname);
-
- // Add combination of user-friendly and unique name to the output list.
- device_names->push_back(device);
- }
-
- return true;
-}
-
-std::string ConvertToWinXPDeviceId(const std::string& device_id) {
- UINT number_of_active_devices = waveInGetNumDevs();
- MMRESULT result = MMSYSERR_NOERROR;
-
- UINT i = 0;
- for (; i < number_of_active_devices; ++i) {
- size_t size = 0;
- // Get the size (including the terminating NULL) of the endpoint ID of the
- // waveIn device.
- result = waveInMessage(reinterpret_cast<HWAVEIN>(i),
- DRV_QUERYFUNCTIONINSTANCEIDSIZE,
- reinterpret_cast<DWORD_PTR>(&size), NULL);
- if (result != MMSYSERR_NOERROR)
- continue;
-
- ScopedCoMem<WCHAR> id;
- id.Reset(static_cast<WCHAR*>(CoTaskMemAlloc(size)));
- if (!id)
- continue;
-
- // Get the endpoint ID string for this waveIn device.
- result = waveInMessage(
- reinterpret_cast<HWAVEIN>(i), DRV_QUERYFUNCTIONINSTANCEID,
- reinterpret_cast<DWORD_PTR>(static_cast<WCHAR*>(id)), size);
- if (result != MMSYSERR_NOERROR)
- continue;
-
- std::string utf8_id = WideToUTF8(static_cast<WCHAR*>(id));
- // Check whether the endpoint ID string of this waveIn device matches that
- // of the audio endpoint device.
- if (device_id == utf8_id)
- break;
- }
-
- // If a matching waveIn device was found, convert the unique endpoint ID
- // string to a standard friendly name with max 32 characters.
- if (i < number_of_active_devices) {
- WAVEINCAPS capabilities;
-
- result = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
- if (result == MMSYSERR_NOERROR)
- return WideToUTF8(capabilities.szPname);
- }
-
- return std::string();
-}
-
-} // namespace media
diff --git a/src/media/audio/win/device_enumeration_win.h b/src/media/audio/win/device_enumeration_win.h
deleted file mode 100644
index 3d44670..0000000
--- a/src/media/audio/win/device_enumeration_win.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
-#define MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
-
-#include <string>
-
-#include "media/audio/audio_device_name.h"
-
-namespace media {
-
-// Returns a list of audio input device structures (name and unique device ID)
-// using the MMDevice API which is supported on Windows Vista and higher.
-// Example record in the output list:
-// - device_name: "Microphone (Realtek High Definition Audio)".
-// - unique_id: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
-// This method must be called from a COM thread using MTA.
-bool GetInputDeviceNamesWin(media::AudioDeviceNames* device_names);
-
-// Returns a list of audio input device structures (name and unique device ID)
-// using the WaveIn API which is supported on Windows XP and higher.
-// Example record in the output list:
-// - device_name: "Microphone (Realtek High Defini".
-// - unique_id: "Microphone (Realtek High Defini" (same as friendly name).
-bool GetInputDeviceNamesWinXP(media::AudioDeviceNames* device_names);
-
-// Converts a device ID generated by |GetInputDeviceNamesWin()| to the
-// corresponding ID by |GetInputDeviceNamesWinXP()|. Returns an empty string on
-// failure.
-// Example input and output:
-// - input ID: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}"
-// - output ID: "Microphone (Realtek High Defini"
-std::string ConvertToWinXPDeviceId(const std::string& device_id);
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_DEVICE_ENUMERATION_WIN_H_
-
diff --git a/src/media/audio/win/wavein_input_win.cc b/src/media/audio/win/wavein_input_win.cc
deleted file mode 100644
index a625707..0000000
--- a/src/media/audio/win/wavein_input_win.cc
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/wavein_input_win.h"
-
-#pragma comment(lib, "winmm.lib")
-
-#include "base/logging.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/device_enumeration_win.h"
-
-namespace {
-const int kStopInputStreamCallbackTimeout = 3000; // Three seconds.
-}
-
-namespace media {
-
-// Our sound buffers are allocated once and kept in a linked list using the
-// the WAVEHDR::dwUser variable. The last buffer points to the first buffer.
-static WAVEHDR* GetNextBuffer(WAVEHDR* current) {
- return reinterpret_cast<WAVEHDR*>(current->dwUser);
-}
-
-PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
- const std::string& device_id)
- : state_(kStateEmpty),
- manager_(manager),
- device_id_(device_id),
- wavein_(NULL),
- callback_(NULL),
- num_buffers_(num_buffers),
- buffer_(NULL),
- channels_(params.channels()) {
- DCHECK_GT(num_buffers_, 0);
- format_.wFormatTag = WAVE_FORMAT_PCM;
- format_.nChannels = params.channels() > 2 ? 2 : params.channels();
- format_.nSamplesPerSec = params.sample_rate();
- format_.wBitsPerSample = params.bits_per_sample();
- format_.cbSize = 0;
- format_.nBlockAlign = (format_.nChannels * format_.wBitsPerSample) / 8;
- format_.nAvgBytesPerSec = format_.nBlockAlign * format_.nSamplesPerSec;
- buffer_size_ = params.frames_per_buffer() * format_.nBlockAlign;
- // If we don't have a packet size we use 100ms.
- if (!buffer_size_)
- buffer_size_ = format_.nAvgBytesPerSec / 10;
- // The event is auto-reset.
- stopped_event_.Set(::CreateEventW(NULL, FALSE, FALSE, NULL));
-}
-
-PCMWaveInAudioInputStream::~PCMWaveInAudioInputStream() {
- DCHECK(NULL == wavein_);
-}
-
-bool PCMWaveInAudioInputStream::Open() {
- if (state_ != kStateEmpty)
- return false;
- if (num_buffers_ < 2 || num_buffers_ > 10)
- return false;
-
- // Convert the stored device id string into an unsigned integer
- // corresponding to the selected device.
- UINT device_id = WAVE_MAPPER;
- if (!GetDeviceId(&device_id)) {
- return false;
- }
-
- // Open the specified input device for recording.
- MMRESULT result = MMSYSERR_NOERROR;
- result = ::waveInOpen(&wavein_, device_id, &format_,
- reinterpret_cast<DWORD_PTR>(WaveCallback),
- reinterpret_cast<DWORD_PTR>(this),
- CALLBACK_FUNCTION);
- if (result != MMSYSERR_NOERROR)
- return false;
-
- SetupBuffers();
- state_ = kStateReady;
- return true;
-}
-
-void PCMWaveInAudioInputStream::SetupBuffers() {
- WAVEHDR* last = NULL;
- WAVEHDR* first = NULL;
- for (int ix = 0; ix != num_buffers_; ++ix) {
- uint32 sz = sizeof(WAVEHDR) + buffer_size_;
- buffer_ = reinterpret_cast<WAVEHDR*>(new char[sz]);
- buffer_->lpData = reinterpret_cast<char*>(buffer_) + sizeof(WAVEHDR);
- buffer_->dwBufferLength = buffer_size_;
- buffer_->dwBytesRecorded = 0;
- buffer_->dwUser = reinterpret_cast<DWORD_PTR>(last);
- buffer_->dwFlags = WHDR_DONE;
- buffer_->dwLoops = 0;
- if (ix == 0)
- first = buffer_;
- last = buffer_;
- ::waveInPrepareHeader(wavein_, buffer_, sizeof(WAVEHDR));
- }
- // Fix the first buffer to point to the last one.
- first->dwUser = reinterpret_cast<DWORD_PTR>(last);
-}
-
-void PCMWaveInAudioInputStream::FreeBuffers() {
- WAVEHDR* current = buffer_;
- for (int ix = 0; ix != num_buffers_; ++ix) {
- WAVEHDR* next = GetNextBuffer(current);
- if (current->dwFlags & WHDR_PREPARED)
- ::waveInUnprepareHeader(wavein_, current, sizeof(WAVEHDR));
- delete[] reinterpret_cast<char*>(current);
- current = next;
- }
- buffer_ = NULL;
-}
-
-void PCMWaveInAudioInputStream::Start(AudioInputCallback* callback) {
- if (state_ != kStateReady)
- return;
-
- callback_ = callback;
- state_ = kStateRecording;
-
- WAVEHDR* buffer = buffer_;
- for (int ix = 0; ix != num_buffers_; ++ix) {
- QueueNextPacket(buffer);
- buffer = GetNextBuffer(buffer);
- }
- buffer = buffer_;
-
- MMRESULT result = ::waveInStart(wavein_);
- if (result != MMSYSERR_NOERROR) {
- HandleError(result);
- state_ = kStateReady;
- } else {
- manager_->IncreaseActiveInputStreamCount();
- }
-}
-
-// Stopping is tricky. First, no buffer should be locked by the audio driver
-// or else the waveInReset() will deadlock and secondly, the callback should
-// not be inside the AudioInputCallback's OnData because waveInReset()
-// forcefully kills the callback thread.
-void PCMWaveInAudioInputStream::Stop() {
- if (state_ != kStateRecording)
- return;
- state_ = kStateStopping;
- // Wait for the callback to finish, it will signal us when ready to be reset.
- if (WAIT_OBJECT_0 !=
- ::WaitForSingleObject(stopped_event_, kStopInputStreamCallbackTimeout)) {
- HandleError(::GetLastError());
- return;
- }
- // Stop is always called before Close. In case of error, this will be
- // also called when closing the input controller.
- manager_->DecreaseActiveInputStreamCount();
-
- state_ = kStateStopped;
- MMRESULT res = ::waveInReset(wavein_);
- if (res != MMSYSERR_NOERROR) {
- state_ = kStateRecording;
- HandleError(res);
- return;
- }
- state_ = kStateReady;
-}
-
-// We can Close in any state except that when trying to close a stream that is
-// recording Windows generates an error, which we propagate to the source.
-void PCMWaveInAudioInputStream::Close() {
- if (wavein_) {
- // waveInClose generates a callback with WIM_CLOSE id in the same thread.
- MMRESULT res = ::waveInClose(wavein_);
- if (res != MMSYSERR_NOERROR) {
- HandleError(res);
- return;
- }
- state_ = kStateClosed;
- wavein_ = NULL;
- FreeBuffers();
- }
- // Tell the audio manager that we have been released. This can result in
- // the manager destroying us in-place so this needs to be the last thing
- // we do on this function.
- manager_->ReleaseInputStream(this);
-}
-
-double PCMWaveInAudioInputStream::GetMaxVolume() {
- // TODO(henrika): Add volume support using the Audio Mixer API.
- return 0.0;
-}
-
-void PCMWaveInAudioInputStream::SetVolume(double volume) {
- // TODO(henrika): Add volume support using the Audio Mixer API.
-}
-
-double PCMWaveInAudioInputStream::GetVolume() {
- // TODO(henrika): Add volume support using the Audio Mixer API.
- return 0.0;
-}
-
-void PCMWaveInAudioInputStream::SetAutomaticGainControl(bool enabled) {
- // TODO(henrika): Add AGC support when volume control has been added.
- NOTIMPLEMENTED();
-}
-
-bool PCMWaveInAudioInputStream::GetAutomaticGainControl() {
- // TODO(henrika): Add AGC support when volume control has been added.
- NOTIMPLEMENTED();
- return false;
-}
-
-void PCMWaveInAudioInputStream::HandleError(MMRESULT error) {
- DLOG(WARNING) << "PCMWaveInAudio error " << error;
- callback_->OnError(this, error);
-}
-
-void PCMWaveInAudioInputStream::QueueNextPacket(WAVEHDR *buffer) {
- MMRESULT res = ::waveInAddBuffer(wavein_, buffer, sizeof(WAVEHDR));
- if (res != MMSYSERR_NOERROR)
- HandleError(res);
-}
-
-bool PCMWaveInAudioInputStream::GetDeviceId(UINT* device_index) {
- // Deliver the default input device id (WAVE_MAPPER) if the default
- // device has been selected.
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- *device_index = WAVE_MAPPER;
- return true;
- }
-
- // Get list of all available and active devices.
- AudioDeviceNames device_names;
- if (!media::GetInputDeviceNamesWinXP(&device_names))
- return false;
-
- if (device_names.empty())
- return false;
-
- // Search the full list of devices and compare with the specified
- // device id which was specified in the constructor. Stop comparing
- // when a match is found and return the corresponding index.
- UINT index = 0;
- bool found_device = false;
- AudioDeviceNames::const_iterator it = device_names.begin();
- while (it != device_names.end()) {
- if (it->unique_id.compare(device_id_) == 0) {
- *device_index = index;
- found_device = true;
- break;
- }
- ++index;
- ++it;
- }
-
- return found_device;
-}
-
-// Windows calls us back in this function when some events happen. Most notably
-// when it has an audio buffer with recorded data.
-void PCMWaveInAudioInputStream::WaveCallback(HWAVEIN hwi, UINT msg,
- DWORD_PTR instance,
- DWORD_PTR param1, DWORD_PTR) {
- PCMWaveInAudioInputStream* obj =
- reinterpret_cast<PCMWaveInAudioInputStream*>(instance);
-
- if (msg == WIM_DATA) {
- // WIM_DONE indicates that the driver is done with our buffer. We pass it
- // to the callback and check if we need to stop playing.
- // It should be OK to assume the data in the buffer is what has been
- // recorded in the soundcard.
- // TODO(henrika): the |volume| parameter is always set to zero since there
- // is currently no support for controlling the microphone volume level.
- WAVEHDR* buffer = reinterpret_cast<WAVEHDR*>(param1);
- obj->callback_->OnData(obj, reinterpret_cast<const uint8*>(buffer->lpData),
- buffer->dwBytesRecorded,
- buffer->dwBytesRecorded,
- 0.0);
-
- if (obj->state_ == kStateStopping) {
- // The main thread has called Stop() and is waiting to issue waveOutReset
- // which will kill this thread. We should not enter AudioSourceCallback
- // code anymore.
- ::SetEvent(obj->stopped_event_);
- } else if (obj->state_ == kStateStopped) {
- // Not sure if ever hit this but just in case.
- } else {
- // Queue the finished buffer back with the audio driver. Since we are
- // reusing the same buffers we can get away without calling
- // waveInPrepareHeader.
- obj->QueueNextPacket(buffer);
- }
- } else if (msg == WIM_CLOSE) {
- // We can be closed before calling Start, so it is possible to have a
- // null callback at this point.
- if (obj->callback_)
- obj->callback_->OnClose(obj);
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/win/wavein_input_win.h b/src/media/audio/win/wavein_input_win.h
deleted file mode 100644
index 82f1f55..0000000
--- a/src/media/audio/win/wavein_input_win.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
-#define MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
-
-#include <string>
-
-#include <windows.h>
-#include <mmsystem.h>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-class PCMWaveInAudioInputStream : public AudioInputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object and |device_id| which
- // is provided by the operating system.
- PCMWaveInAudioInputStream(AudioManagerWin* manager,
- const AudioParameters& params,
- int num_buffers,
- const std::string& device_id);
- virtual ~PCMWaveInAudioInputStream();
-
- // Implementation of AudioInputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioInputCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- // TODO(henrika): Add volume support using the Audio Mixer API.
- virtual double GetMaxVolume() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual double GetVolume() OVERRIDE;
- virtual void SetAutomaticGainControl(bool enabled) OVERRIDE;
- virtual bool GetAutomaticGainControl() OVERRIDE;
-
- private:
- enum State {
- kStateEmpty, // Initial state.
- kStateReady, // Device obtained and ready to record.
- kStateRecording, // Recording audio.
- kStateStopping, // Trying to stop, waiting for callback to finish.
- kStateStopped, // Stopped. Device was reset.
- kStateClosed // Device has been released.
- };
-
- // Allow unit tests to query the device ID.
- friend class AudioInputDeviceTest;
-
- // Windows calls us back with the recorded audio data here. See msdn
- // documentation for 'waveInProc' for details about the parameters.
- static void CALLBACK WaveCallback(HWAVEIN hwi, UINT msg, DWORD_PTR instance,
- DWORD_PTR param1, DWORD_PTR param2);
-
- // If windows reports an error this function handles it and passes it to
- // the attached AudioInputCallback::OnError().
- void HandleError(MMRESULT error);
-
- // Allocates and prepares the memory that will be used for recording.
- void SetupBuffers();
-
- // Deallocates the memory allocated in SetupBuffers.
- void FreeBuffers();
-
- // Sends a buffer to the audio driver for recording.
- void QueueNextPacket(WAVEHDR* buffer);
-
- // Converts the stored device id string into an unsigned integer which
- // can be used by waveInOpen() to open the specified capture device.
- bool GetDeviceId(UINT* device_index);
-
- // Reader beware. Visual C has stronger guarantees on volatile vars than
- // most people expect. In fact, it has release semantics on write and
- // acquire semantics on reads. See the msdn documentation.
- volatile State state_;
-
- // The audio manager that created this input stream. We notify it when
- // we close so it can release its own resources.
- AudioManagerWin* manager_;
-
- // We use the callback mostly to periodically give the recorded audio data.
- AudioInputCallback* callback_;
-
- // The number of buffers of size |buffer_size_| each to use.
- const int num_buffers_;
-
- // The size in bytes of each audio buffer.
- uint32 buffer_size_;
-
- // Channels, 1 or 2.
- const int channels_;
-
- // Contains the unique name of the selected endpoint device.
- // Note that AudioManagerBase::kDefaultDeviceId represents the default
- // device role and is not a valid ID as such.
- std::string device_id_;
-
- // Windows native structure to encode the format parameters.
- WAVEFORMATEX format_;
-
- // Handle to the instance of the wave device.
- HWAVEIN wavein_;
-
- // Pointer to the first allocated audio buffer. This object owns it.
- WAVEHDR* buffer_;
-
- // An event that is signaled when the callback thread is ready to stop.
- base::win::ScopedHandle stopped_event_;
-
- DISALLOW_COPY_AND_ASSIGN(PCMWaveInAudioInputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_WAVEIN_INPUT_WIN_H_
diff --git a/src/media/audio/win/waveout_output_win.cc b/src/media/audio/win/waveout_output_win.cc
deleted file mode 100644
index f18b2fa..0000000
--- a/src/media/audio/win/waveout_output_win.cc
+++ /dev/null
@@ -1,422 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/waveout_output_win.h"
-
-#include <windows.h>
-#include <mmsystem.h>
-#pragma comment(lib, "winmm.lib")
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_util.h"
-#include "media/audio/win/audio_manager_win.h"
-
-namespace media {
-
-// Some general thoughts about the waveOut API which is badly documented :
-// - We use CALLBACK_EVENT mode in which XP signals events such as buffer
-// releases.
-// - We use RegisterWaitForSingleObject() so one of threads in thread pool
-// automatically calls our callback that feeds more data to Windows.
-// - Windows does not provide a way to query if the device is playing or paused
-// thus it forces you to maintain state, which naturally is not exactly
-// synchronized to the actual device state.
-
-// Sixty four MB is the maximum buffer size per AudioOutputStream.
-static const uint32 kMaxOpenBufferSize = 1024 * 1024 * 64;
-
-// See Also
-// http://www.thx.com/consumer/home-entertainment/home-theater/surround-sound-speaker-set-up/
-// http://en.wikipedia.org/wiki/Surround_sound
-
-static const int kMaxChannelsToMask = 8;
-static const unsigned int kChannelsToMask[kMaxChannelsToMask + 1] = {
- 0,
- // 1 = Mono
- SPEAKER_FRONT_CENTER,
- // 2 = Stereo
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT,
- // 3 = Stereo + Center
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER,
- // 4 = Quad
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
- SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
- // 5 = 5.0
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER |
- SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
- // 6 = 5.1
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
- SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
- SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
- // 7 = 6.1
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
- SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
- SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT |
- SPEAKER_BACK_CENTER,
- // 8 = 7.1
- SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT |
- SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY |
- SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT |
- SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT
- // TODO(fbarchard): Add additional masks for 7.2 and beyond.
-};
-
-inline size_t PCMWaveOutAudioOutputStream::BufferSize() const {
- // Round size of buffer up to the nearest 16 bytes.
- return (sizeof(WAVEHDR) + buffer_size_ + 15u) & static_cast<size_t>(~15);
-}
-
-inline WAVEHDR* PCMWaveOutAudioOutputStream::GetBuffer(int n) const {
- DCHECK_GE(n, 0);
- DCHECK_LT(n, num_buffers_);
- return reinterpret_cast<WAVEHDR*>(&buffers_[n * BufferSize()]);
-}
-
-PCMWaveOutAudioOutputStream::PCMWaveOutAudioOutputStream(
- AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
- UINT device_id)
- : state_(PCMA_BRAND_NEW),
- manager_(manager),
- device_id_(device_id),
- waveout_(NULL),
- callback_(NULL),
- num_buffers_(num_buffers),
- buffer_size_(params.GetBytesPerBuffer()),
- volume_(1),
- channels_(params.channels()),
- pending_bytes_(0),
- waiting_handle_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- format_.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format_.Format.nChannels = params.channels();
- format_.Format.nSamplesPerSec = params.sample_rate();
- format_.Format.wBitsPerSample = params.bits_per_sample();
- format_.Format.cbSize = sizeof(format_) - sizeof(WAVEFORMATEX);
- // The next are computed from above.
- format_.Format.nBlockAlign = (format_.Format.nChannels *
- format_.Format.wBitsPerSample) / 8;
- format_.Format.nAvgBytesPerSec = format_.Format.nBlockAlign *
- format_.Format.nSamplesPerSec;
- if (params.channels() > kMaxChannelsToMask) {
- format_.dwChannelMask = kChannelsToMask[kMaxChannelsToMask];
- } else {
- format_.dwChannelMask = kChannelsToMask[params.channels()];
- }
- format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
- format_.Samples.wValidBitsPerSample = params.bits_per_sample();
-}
-
-PCMWaveOutAudioOutputStream::~PCMWaveOutAudioOutputStream() {
- DCHECK(NULL == waveout_);
-}
-
-bool PCMWaveOutAudioOutputStream::Open() {
- if (state_ != PCMA_BRAND_NEW)
- return false;
- if (BufferSize() * num_buffers_ > kMaxOpenBufferSize)
- return false;
- if (num_buffers_ < 2 || num_buffers_ > 5)
- return false;
-
- // Create buffer event.
- buffer_event_.Set(::CreateEvent(NULL, // Security attributes.
- FALSE, // It will auto-reset.
- FALSE, // Initial state.
- NULL)); // No name.
- if (!buffer_event_.Get())
- return false;
-
- // Open the device.
- // We'll be getting buffer_event_ events when it's time to refill the buffer.
- MMRESULT result = ::waveOutOpen(
- &waveout_,
- device_id_,
- reinterpret_cast<LPCWAVEFORMATEX>(&format_),
- reinterpret_cast<DWORD_PTR>(buffer_event_.Get()),
- NULL,
- CALLBACK_EVENT);
- if (result != MMSYSERR_NOERROR)
- return false;
-
- SetupBuffers();
- state_ = PCMA_READY;
- return true;
-}
-
-void PCMWaveOutAudioOutputStream::SetupBuffers() {
- buffers_.reset(new char[BufferSize() * num_buffers_]);
- for (int ix = 0; ix != num_buffers_; ++ix) {
- WAVEHDR* buffer = GetBuffer(ix);
- buffer->lpData = reinterpret_cast<char*>(buffer) + sizeof(WAVEHDR);
- buffer->dwBufferLength = buffer_size_;
- buffer->dwBytesRecorded = 0;
- buffer->dwFlags = WHDR_DONE;
- buffer->dwLoops = 0;
- // Tell windows sound drivers about our buffers. Not documented what
- // this does but we can guess that causes the OS to keep a reference to
- // the memory pages so the driver can use them without worries.
- ::waveOutPrepareHeader(waveout_, buffer, sizeof(WAVEHDR));
- }
-}
-
-void PCMWaveOutAudioOutputStream::FreeBuffers() {
- for (int ix = 0; ix != num_buffers_; ++ix) {
- ::waveOutUnprepareHeader(waveout_, GetBuffer(ix), sizeof(WAVEHDR));
- }
- buffers_.reset(NULL);
-}
-
-// Initially we ask the source to fill up all audio buffers. If we don't do
-// this then we would always get the driver callback when it is about to run
-// samples and that would leave too little time to react.
-void PCMWaveOutAudioOutputStream::Start(AudioSourceCallback* callback) {
- if (state_ != PCMA_READY)
- return;
- callback_ = callback;
-
- // Reset buffer event, it can be left in the arbitrary state if we
- // previously stopped the stream. Can happen because we are stopping
- // callbacks before stopping playback itself.
- if (!::ResetEvent(buffer_event_.Get())) {
- HandleError(MMSYSERR_ERROR);
- return;
- }
-
- // Start watching for buffer events.
- if (!::RegisterWaitForSingleObject(&waiting_handle_,
- buffer_event_.Get(),
- &BufferCallback,
- this,
- INFINITE,
- WT_EXECUTEDEFAULT)) {
- HandleError(MMSYSERR_ERROR);
- waiting_handle_ = NULL;
- return;
- }
-
- state_ = PCMA_PLAYING;
-
- // Queue the buffers.
- pending_bytes_ = 0;
- for (int ix = 0; ix != num_buffers_; ++ix) {
- WAVEHDR* buffer = GetBuffer(ix);
- // Caller waits for 1st packet to become available, but not for others,
- // so we wait for them here.
- if (ix != 0)
- callback_->WaitTillDataReady();
- QueueNextPacket(buffer); // Read more data.
- pending_bytes_ += buffer->dwBufferLength;
- }
-
- // From now on |pending_bytes_| would be accessed by callback thread.
- // Most likely waveOutPause() or waveOutRestart() has its own memory barrier,
- // but issuing our own is safer.
- MemoryBarrier();
-
- MMRESULT result = ::waveOutPause(waveout_);
- if (result != MMSYSERR_NOERROR) {
- HandleError(result);
- return;
- }
-
- // Send the buffers to the audio driver. Note that the device is paused
- // so we avoid entering the callback method while still here.
- for (int ix = 0; ix != num_buffers_; ++ix) {
- result = ::waveOutWrite(waveout_, GetBuffer(ix), sizeof(WAVEHDR));
- if (result != MMSYSERR_NOERROR) {
- HandleError(result);
- break;
- }
- }
- result = ::waveOutRestart(waveout_);
- if (result != MMSYSERR_NOERROR) {
- HandleError(result);
- return;
- }
-}
-
-// Stopping is tricky if we want it be fast.
-// For now just do it synchronously and avoid all the complexities.
-// TODO(enal): if we want faster Stop() we can create singleton that keeps track
-// of all currently playing streams. Then you don't have to wait
-// till all callbacks are completed. Of course access to singleton
-// should be under its own lock, and checking the liveness and
-// acquiring the lock on stream should be done atomically.
-void PCMWaveOutAudioOutputStream::Stop() {
- if (state_ != PCMA_PLAYING)
- return;
- state_ = PCMA_STOPPING;
- MemoryBarrier();
-
- // Stop watching for buffer event, wait till all the callbacks are complete.
- // Should be done before ::waveOutReset() call to avoid race condition when
- // callback that is currently active and already checked that stream is still
- // being played calls ::waveOutWrite() after ::waveOutReset() returns, later
- // causing ::waveOutClose() to fail with WAVERR_STILLPLAYING.
- // TODO(enal): that delays actual stopping of playback. Alternative can be
- // to call ::waveOutReset() twice, once before
- // ::UnregisterWaitEx() and once after.
- if (waiting_handle_) {
- if (!::UnregisterWaitEx(waiting_handle_, INVALID_HANDLE_VALUE)) {
- state_ = PCMA_PLAYING;
- HandleError(MMSYSERR_ERROR);
- return;
- }
- waiting_handle_ = NULL;
- }
-
- // Stop playback.
- MMRESULT res = ::waveOutReset(waveout_);
- if (res != MMSYSERR_NOERROR) {
- state_ = PCMA_PLAYING;
- HandleError(res);
- return;
- }
-
- // Wait for lock to ensure all outstanding callbacks have completed.
- base::AutoLock auto_lock(lock_);
-
- // waveOutReset() leaves buffers in the unpredictable state, causing
- // problems if we want to close, release, or reuse them. Fix the states.
- for (int ix = 0; ix != num_buffers_; ++ix) {
- GetBuffer(ix)->dwFlags = WHDR_PREPARED;
- }
-
- // Don't use callback after Stop().
- callback_ = NULL;
-
- state_ = PCMA_READY;
-}
-
-// We can Close in any state except that trying to close a stream that is
-// playing Windows generates an error. We cannot propagate it to the source,
-// as callback_ is set to NULL. Just print it and hope somebody somehow
-// will find it...
-void PCMWaveOutAudioOutputStream::Close() {
- Stop(); // Just to be sure. No-op if not playing.
- if (waveout_) {
- MMRESULT result = ::waveOutClose(waveout_);
- // If ::waveOutClose() fails we cannot just delete the stream, callback
- // may try to access it and would crash. Better to leak the stream.
- if (result != MMSYSERR_NOERROR) {
- HandleError(result);
- state_ = PCMA_PLAYING;
- return;
- }
- state_ = PCMA_CLOSED;
- waveout_ = NULL;
- FreeBuffers();
- }
- // Tell the audio manager that we have been released. This can result in
- // the manager destroying us in-place so this needs to be the last thing
- // we do on this function.
- manager_->ReleaseOutputStream(this);
-}
-
-void PCMWaveOutAudioOutputStream::SetVolume(double volume) {
- if (!waveout_)
- return;
- volume_ = static_cast<float>(volume);
-}
-
-void PCMWaveOutAudioOutputStream::GetVolume(double* volume) {
- if (!waveout_)
- return;
- *volume = volume_;
-}
-
-void PCMWaveOutAudioOutputStream::HandleError(MMRESULT error) {
- DLOG(WARNING) << "PCMWaveOutAudio error " << error;
- if (callback_)
- callback_->OnError(this, error);
-}
-
-void PCMWaveOutAudioOutputStream::QueueNextPacket(WAVEHDR *buffer) {
- DCHECK_EQ(channels_, format_.Format.nChannels);
- // Call the source which will fill our buffer with pleasant sounds and
- // return to us how many bytes were used.
- // TODO(fbarchard): Handle used 0 by queueing more.
-
- // HACK: Yield if Read() is called too often. On older platforms which are
- // still using the WaveOut backend, we run into synchronization issues where
- // the renderer has not finished filling the shared memory when Read() is
- // called. Reading too early will lead to clicks and pops. See issues:
- // http://crbug.com/161307 and http://crbug.com/61022
- callback_->WaitTillDataReady();
-
- // TODO(sergeyu): Specify correct hardware delay for AudioBuffersState.
- int frames_filled = callback_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(pending_bytes_, 0));
- uint32 used = frames_filled * audio_bus_->channels() *
- format_.Format.wBitsPerSample / 8;
-
- if (used <= buffer_size_) {
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->ToInterleaved(
- frames_filled, format_.Format.wBitsPerSample / 8, buffer->lpData);
-
- buffer->dwBufferLength = used * format_.Format.nChannels / channels_;
- media::AdjustVolume(buffer->lpData, used,
- format_.Format.nChannels,
- format_.Format.wBitsPerSample >> 3,
- volume_);
- } else {
- HandleError(0);
- return;
- }
- buffer->dwFlags = WHDR_PREPARED;
-}
-
-// One of the threads in our thread pool asynchronously calls this function when
-// buffer_event_ is signalled. Search through all the buffers looking for freed
-// ones, fills them with data, and "feed" the Windows.
-// Note: by searching through all the buffers we guarantee that we fill all the
-// buffers, even when "event loss" happens, i.e. if Windows signals event
-// when it did not flip into unsignaled state from the previous signal.
-void NTAPI PCMWaveOutAudioOutputStream::BufferCallback(PVOID lpParameter,
- BOOLEAN timer_fired) {
- TRACE_EVENT0("audio", "PCMWaveOutAudioOutputStream::BufferCallback");
-
- DCHECK(!timer_fired);
- PCMWaveOutAudioOutputStream* stream =
- reinterpret_cast<PCMWaveOutAudioOutputStream*>(lpParameter);
-
- // Lock the stream so callbacks do not interfere with each other.
- // Several callbacks can be called simultaneously by different threads in the
- // thread pool if some of the callbacks are slow, or system is very busy and
- // scheduled callbacks are not called on time.
- base::AutoLock auto_lock(stream->lock_);
- if (stream->state_ != PCMA_PLAYING)
- return;
-
- for (int ix = 0; ix != stream->num_buffers_; ++ix) {
- WAVEHDR* buffer = stream->GetBuffer(ix);
- if (buffer->dwFlags & WHDR_DONE) {
- // Before we queue the next packet, we need to adjust the number of
- // pending bytes since the last write to hardware.
- stream->pending_bytes_ -= buffer->dwBufferLength;
- stream->QueueNextPacket(buffer);
-
- // QueueNextPacket() can take a long time, especially if several of them
- // were called back-to-back. Check if we are stopping now.
- if (stream->state_ != PCMA_PLAYING)
- return;
-
- // Time to send the buffer to the audio driver. Since we are reusing
- // the same buffers we can get away without calling waveOutPrepareHeader.
- MMRESULT result = ::waveOutWrite(stream->waveout_,
- buffer,
- sizeof(WAVEHDR));
- if (result != MMSYSERR_NOERROR)
- stream->HandleError(result);
- stream->pending_bytes_ += buffer->dwBufferLength;
- }
- }
-}
-
-} // namespace media
diff --git a/src/media/audio/win/waveout_output_win.h b/src/media/audio/win/waveout_output_win.h
deleted file mode 100644
index a62fcfc..0000000
--- a/src/media/audio/win/waveout_output_win.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_
-#define MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_
-
-#include <windows.h>
-#include <mmsystem.h>
-#include <mmreg.h>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// Implements PCM audio output support for Windows using the WaveXXX API.
-// While not as nice as the DirectSound-based API, it should work in all target
-// operating systems regardless or DirectX version installed. It is known that
-// in some machines WaveXXX based audio is better while in others DirectSound
-// is better.
-//
-// Important: the OnXXXX functions in AudioSourceCallback are called by more
-// than one thread so it is important to have some form of synchronization if
-// you are keeping state in it.
-class PCMWaveOutAudioOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the the
- // audio manager who is creating this object and |device_id| which is provided
- // by the operating system.
- PCMWaveOutAudioOutputStream(AudioManagerWin* manager,
- const AudioParameters& params,
- int num_buffers,
- UINT device_id);
- virtual ~PCMWaveOutAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open();
- virtual void Close();
- virtual void Start(AudioSourceCallback* callback);
- virtual void Stop();
- virtual void SetVolume(double volume);
- virtual void GetVolume(double* volume);
-
- // Sends a buffer to the audio driver for playback.
- void QueueNextPacket(WAVEHDR* buffer);
-
- private:
- enum State {
- PCMA_BRAND_NEW, // Initial state.
- PCMA_READY, // Device obtained and ready to play.
- PCMA_PLAYING, // Playing audio.
- PCMA_STOPPING, // Audio is stopping, do not "feed" data to Windows.
- PCMA_CLOSED // Device has been released.
- };
-
- // Returns pointer to the n-th buffer.
- inline WAVEHDR* GetBuffer(int n) const;
-
- // Size of one buffer in bytes, rounded up if necessary.
- inline size_t BufferSize() const;
-
- // Windows calls us back asking for more data when buffer_event_ signalled.
- // See MSDN for help on RegisterWaitForSingleObject() and waveOutOpen().
- static void NTAPI BufferCallback(PVOID lpParameter, BOOLEAN timer_fired);
-
- // If windows reports an error this function handles it and passes it to
- // the attached AudioSourceCallback::OnError().
- void HandleError(MMRESULT error);
-
- // Allocates and prepares the memory that will be used for playback.
- void SetupBuffers();
-
- // Deallocates the memory allocated in SetupBuffers.
- void FreeBuffers();
-
- // Reader beware. Visual C has stronger guarantees on volatile vars than
- // most people expect. In fact, it has release semantics on write and
- // acquire semantics on reads. See the msdn documentation.
- volatile State state_;
-
- // The audio manager that created this output stream. We notify it when
- // we close so it can release its own resources.
- AudioManagerWin* manager_;
-
- // We use the callback mostly to periodically request more audio data.
- AudioSourceCallback* callback_;
-
- // The number of buffers of size |buffer_size_| each to use.
- const int num_buffers_;
-
- // The size in bytes of each audio buffer, we usually have two of these.
- uint32 buffer_size_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Channels from 0 to 8.
- const int channels_;
-
- // Number of bytes yet to be played in the hardware buffer.
- uint32 pending_bytes_;
-
- // The id assigned by the operating system to the selected wave output
- // hardware device. Usually this is just -1 which means 'default device'.
- UINT device_id_;
-
- // Windows native structure to encode the format parameters.
- WAVEFORMATPCMEX format_;
-
- // Handle to the instance of the wave device.
- HWAVEOUT waveout_;
-
- // Handle to the buffer event.
- base::win::ScopedHandle buffer_event_;
-
- // Handle returned by RegisterWaitForSingleObject().
- HANDLE waiting_handle_;
-
- // Pointer to the allocated audio buffers, we allocate all buffers in one big
- // chunk. This object owns them.
- scoped_array<char> buffers_;
-
- // Lock used to avoid the conflict when callbacks are called simultaneously.
- base::Lock lock_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(PCMWaveOutAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_WAVEOUT_OUTPUT_WIN_H_
diff --git a/src/media/base/audio_bus.cc b/src/media/base/audio_bus.cc
deleted file mode 100644
index adf7de9..0000000
--- a/src/media/base/audio_bus.cc
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_bus.h"
-
-#include <limits>
-
-#include "base/logging.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-static bool IsAligned(void* ptr) {
- return (reinterpret_cast<uintptr_t>(ptr) &
- (AudioBus::kChannelAlignment - 1)) == 0U;
-}
-
-// Calculates the required size for an AudioBus with the given params, sets
-// |aligned_frames| to the actual frame length of each channel array.
-static int CalculateMemorySizeInternal(int channels, int frames,
- int* out_aligned_frames) {
- // Choose a size such that each channel will be aligned by
- // kChannelAlignment when stored in a contiguous block.
- int aligned_frames =
- ((frames * sizeof(float) + AudioBus::kChannelAlignment - 1) &
- ~(AudioBus::kChannelAlignment - 1)) / sizeof(float);
-
- if (out_aligned_frames)
- *out_aligned_frames = aligned_frames;
-
- return sizeof(float) * channels * aligned_frames;
-}
-
-// |Format| is the destination type, |Fixed| is a type larger than |Format|
-// such that operations can be made without overflowing.
-template<class Format, class Fixed>
-static void FromInterleavedInternal(const void* src, int start_frame,
- int frames, AudioBus* dest) {
- const Format* source = static_cast<const Format*>(src);
-
- static const Fixed kBias = std::numeric_limits<Format>::is_signed ? 0 :
- std::numeric_limits<Format>::max() / 2 + 1;
- static const float kMaxScale = 1.0f / (kBias ? kBias - 1 :
- std::numeric_limits<Format>::max());
- static const float kMinScale = 1.0f / (kBias ? kBias :
- -static_cast<Fixed>(std::numeric_limits<Format>::min()));
-
- int channels = dest->channels();
- for (int ch = 0; ch < channels; ++ch) {
- float* channel_data = dest->channel(ch);
- for (int i = start_frame, offset = ch; i < start_frame + frames;
- ++i, offset += channels) {
- Fixed v = static_cast<Fixed>(source[offset]) - kBias;
- channel_data[i] = v * (v < 0 ? kMinScale : kMaxScale);
- }
- }
-}
-
-// |Format| is the destination type, |Fixed| is a type larger than |Format|
-// such that operations can be made without overflowing.
-template<class Format, class Fixed>
-static void ToInterleavedInternal(const AudioBus* source, int start_frame,
- int frames, void* dst) {
- Format* dest = static_cast<Format*>(dst);
-
- static const Format kBias = std::numeric_limits<Format>::is_signed ? 0 :
- std::numeric_limits<Format>::max() / 2 + 1;
- static const Fixed kMaxValue = kBias ? kBias - 1 :
- std::numeric_limits<Format>::max();
- static const Fixed kMinValue = kBias ? -kBias :
- std::numeric_limits<Format>::min();
-
- int channels = source->channels();
- for (int ch = 0; ch < channels; ++ch) {
- const float* channel_data = source->channel(ch);
- for (int i = start_frame, offset = ch; i < frames;
- ++i, offset += channels) {
- float v = channel_data[i];
- Fixed sample = v * (v < 0 ? -kMinValue : kMaxValue);
-
- if (sample > kMaxValue)
- sample = kMaxValue;
- else if (sample < kMinValue)
- sample = kMinValue;
-
- dest[offset] = static_cast<Format>(sample) + kBias;
- }
- }
-}
-
-static void ValidateConfig(int channels, int frames) {
- CHECK_GT(frames, 0);
- CHECK_GT(channels, 0);
- CHECK_LE(channels, limits::kMaxChannels);
-}
-
-static void CheckOverflow(int start_frame, int frames, int total_frames) {
- CHECK_GE(start_frame, 0);
- CHECK_GE(frames, 0);
- CHECK_GT(total_frames, 0);
- int sum = start_frame + frames;
- CHECK_LE(sum, total_frames);
- CHECK_GE(sum, 0);
-}
-
-AudioBus::AudioBus(int channels, int frames)
- : frames_(frames),
- can_set_channel_data_(false) {
- ValidateConfig(channels, frames_);
-
- int aligned_frames = 0;
- int size = CalculateMemorySizeInternal(channels, frames, &aligned_frames);
-
- data_.reset(static_cast<float*>(base::AlignedAlloc(
- size, AudioBus::kChannelAlignment)));
-
- BuildChannelData(channels, aligned_frames, data_.get());
-}
-
-AudioBus::AudioBus(int channels, int frames, float* data)
- : frames_(frames),
- can_set_channel_data_(false) {
- ValidateConfig(channels, frames_);
-
- int aligned_frames = 0;
- CalculateMemorySizeInternal(channels, frames, &aligned_frames);
-
- BuildChannelData(channels, aligned_frames, data);
-}
-
-AudioBus::AudioBus(int frames, const std::vector<float*>& channel_data)
- : channel_data_(channel_data),
- frames_(frames),
- can_set_channel_data_(false) {
- ValidateConfig(channel_data_.size(), frames_);
-
- // Sanity check wrapped vector for alignment and channel count.
- for (size_t i = 0; i < channel_data_.size(); ++i)
- DCHECK(IsAligned(channel_data_[i]));
-}
-
-AudioBus::AudioBus(int channels)
- : channel_data_(channels),
- frames_(0),
- can_set_channel_data_(true) {
- for (size_t i = 0; i < channel_data_.size(); ++i)
- channel_data_[i] = NULL;
-}
-
-AudioBus::~AudioBus() {}
-
-scoped_ptr<AudioBus> AudioBus::Create(int channels, int frames) {
- return scoped_ptr<AudioBus>(new AudioBus(channels, frames));
-}
-
-scoped_ptr<AudioBus> AudioBus::Create(const AudioParameters& params) {
- return scoped_ptr<AudioBus>(new AudioBus(
- params.channels(), params.frames_per_buffer()));
-}
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-
-scoped_ptr<AudioBus> AudioBus::Create(int channels, int frames_per_channel,
- int bytes_per_frame, bool interleaved) {
- // AudioBus treats everything in float so we have to convert.
- uint32 float_frame_per_channel =
- frames_per_channel * bytes_per_frame / sizeof(float);
- if (interleaved)
- return Create(1, channels * float_frame_per_channel);
-
- return Create(channels, float_frame_per_channel);
-}
-
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-scoped_ptr<AudioBus> AudioBus::CreateWrapper(int channels) {
- return scoped_ptr<AudioBus>(new AudioBus(channels));
-}
-
-scoped_ptr<AudioBus> AudioBus::WrapVector(
- int frames, const std::vector<float*>& channel_data) {
- return scoped_ptr<AudioBus>(new AudioBus(frames, channel_data));
-}
-
-scoped_ptr<AudioBus> AudioBus::WrapMemory(int channels, int frames,
- void* data) {
- // |data| must be aligned by AudioBus::kChannelAlignment.
- CHECK(IsAligned(data));
- return scoped_ptr<AudioBus>(new AudioBus(
- channels, frames, static_cast<float*>(data)));
-}
-
-scoped_ptr<AudioBus> AudioBus::WrapMemory(const AudioParameters& params,
- void* data) {
- // |data| must be aligned by AudioBus::kChannelAlignment.
- CHECK(IsAligned(data));
- return scoped_ptr<AudioBus>(new AudioBus(
- params.channels(), params.frames_per_buffer(),
- static_cast<float*>(data)));
-}
-
-void AudioBus::SetChannelData(int channel, float* data) {
- CHECK(can_set_channel_data_);
- CHECK_GE(channel, 0);
- CHECK_LT(static_cast<size_t>(channel), channel_data_.size());
- DCHECK(IsAligned(data));
- channel_data_[channel] = data;
-}
-
-void AudioBus::set_frames(int frames) {
- CHECK(can_set_channel_data_);
- frames_ = frames;
-}
-
-void AudioBus::ZeroFramesPartial(int start_frame, int frames) {
- CheckOverflow(start_frame, frames, frames_);
-
- if (frames <= 0)
- return;
-
- for (size_t i = 0; i < channel_data_.size(); ++i) {
- memset(channel_data_[i] + start_frame, 0,
- frames * sizeof(*channel_data_[i]));
- }
-}
-
-void AudioBus::ZeroFrames(int frames) {
- ZeroFramesPartial(0, frames);
-}
-
-void AudioBus::Zero() {
- ZeroFrames(frames_);
-}
-
-int AudioBus::CalculateMemorySize(const AudioParameters& params) {
- return CalculateMemorySizeInternal(
- params.channels(), params.frames_per_buffer(), NULL);
-}
-
-int AudioBus::CalculateMemorySize(int channels, int frames) {
- return CalculateMemorySizeInternal(channels, frames, NULL);
-}
-
-void AudioBus::BuildChannelData(int channels, int aligned_frames, float* data) {
- DCHECK(IsAligned(data));
- DCHECK_EQ(channel_data_.size(), 0U);
- // Separate audio data out into channels for easy lookup later. Figure out
- channel_data_.reserve(channels);
- for (int i = 0; i < channels; ++i)
- channel_data_.push_back(data + i * aligned_frames);
-}
-
-// TODO(dalecurtis): See if intrinsic optimizations help any here.
-void AudioBus::FromInterleavedPartial(const void* source, int start_frame,
- int frames, int bytes_per_sample) {
- CheckOverflow(start_frame, frames, frames_);
- switch (bytes_per_sample) {
- case 1:
- FromInterleavedInternal<uint8, int16>(source, start_frame, frames, this);
- break;
- case 2:
- FromInterleavedInternal<int16, int32>(source, start_frame, frames, this);
- break;
- case 4:
- FromInterleavedInternal<int32, int64>(source, start_frame, frames, this);
- break;
- default:
- NOTREACHED() << "Unsupported bytes per sample encountered.";
- ZeroFramesPartial(start_frame, frames);
- return;
- }
-
- // Don't clear remaining frames if this is a partial deinterleave.
- if (!start_frame) {
- // Zero any remaining frames.
- ZeroFramesPartial(frames, frames_ - frames);
- }
-}
-
-void AudioBus::FromInterleaved(const void* source, int frames,
- int bytes_per_sample) {
- FromInterleavedPartial(source, 0, frames, bytes_per_sample);
-}
-
-void AudioBus::ToInterleaved(int frames, int bytes_per_sample,
- void* dest) const {
- ToInterleavedPartial(0, frames, bytes_per_sample, dest);
-}
-
-// TODO(dalecurtis): See if intrinsic optimizations help any here.
-void AudioBus::ToInterleavedPartial(int start_frame, int frames,
- int bytes_per_sample, void* dest) const {
- CheckOverflow(start_frame, frames, frames_);
- switch (bytes_per_sample) {
- case 1:
- ToInterleavedInternal<uint8, int16>(this, start_frame, frames, dest);
- break;
- case 2:
- ToInterleavedInternal<int16, int32>(this, start_frame, frames, dest);
- break;
- case 4:
- ToInterleavedInternal<int32, int64>(this, start_frame, frames, dest);
- break;
- default:
- NOTREACHED() << "Unsupported bytes per sample encountered.";
- memset(dest, 0, frames * bytes_per_sample);
- return;
- }
-}
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-void AudioBus::FromInterleavedFloat(const float* source, int frames,
- int audio_bus_offset) {
- DCHECK_LE(frames + audio_bus_offset, this->frames());
-
- while (frames > 0) {
- for (int channel = 0; channel < channels(); ++channel) {
- this->channel(channel)[audio_bus_offset] = *source;
- ++source;
- }
- ++audio_bus_offset;
- --frames;
- }
-}
-
-void AudioBus::ToInterleavedFloat(
- int frames, int audio_bus_offset, int extra_channels, float* dest) const {
- DCHECK_LE(frames + audio_bus_offset, this->frames());
- DCHECK_GE(extra_channels, 0);
-
- while (frames > 0) {
- for (int channel = 0; channel < channels(); ++channel) {
- *dest = this->channel(channel)[audio_bus_offset];
- ++dest;
- }
- for (int channel = 0; channel < extra_channels; ++channel) {
- *dest = 0.f;
- ++dest;
- }
- ++audio_bus_offset;
- --frames;
- }
-}
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-void AudioBus::CopyTo(AudioBus* dest) const {
- CHECK_EQ(channels(), dest->channels());
- CHECK_EQ(frames(), dest->frames());
-
- // Since we don't know if the other AudioBus is wrapped or not (and we don't
- // want to care), just copy using the public channel() accessors.
- for (int i = 0; i < channels(); ++i)
- memcpy(dest->channel(i), channel(i), sizeof(*channel(i)) * frames());
-}
-
-} // namespace media
diff --git a/src/media/base/audio_bus.h b/src/media/base/audio_bus.h
deleted file mode 100644
index 67b4ef6..0000000
--- a/src/media/base/audio_bus.h
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_BUS_H_
-#define MEDIA_BASE_AUDIO_BUS_H_
-
-#include <limits>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-
-namespace media {
-class AudioParameters;
-
-// Scoped container for "busing" audio channel data around. Each channel is
-// stored in planar format and guaranteed to be aligned by kChannelAlignment.
-// AudioBus objects can be created normally or via wrapping. Normally, AudioBus
-// will dice up a contiguous memory block for channel data. When wrapped,
-// AudioBus instead routes requests for channel data to the wrapped object.
-class MEDIA_EXPORT AudioBus {
- public:
-#if defined(__LB_PS3__)
- // We interleave our data already, but all data is floats, so we require that
- // no frame be smaller than 4 bytes.
- enum { kChannelAlignment = 4 };
-#else
- // Guaranteed alignment of each channel's data; use 16-byte alignment for easy
- // SSE optimizations.
- enum { kChannelAlignment = 16 };
-#endif
-
- // Creates a new AudioBus and allocates |channels| of length |frames|. Uses
- // channels() and frames_per_buffer() from AudioParameters if given.
- static scoped_ptr<AudioBus> Create(int channels, int frames);
- static scoped_ptr<AudioBus> Create(const AudioParameters& params);
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- static scoped_ptr<AudioBus> Create(int channels, int frames_per_channel,
- int bytes_per_frame, bool interleaved);
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
- // Creates a new AudioBus with the given number of channels, but zero length.
- // It's expected to be used with SetChannelData() and set_frames() to
- // wrap externally allocated memory.
- static scoped_ptr<AudioBus> CreateWrapper(int channels);
-
- // Creates a new AudioBus from an existing channel vector. Does not transfer
- // ownership of |channel_data| to AudioBus; i.e., |channel_data| must outlive
- // the returned AudioBus. Each channel must be aligned by kChannelAlignment.
- static scoped_ptr<AudioBus> WrapVector(
- int frames, const std::vector<float*>& channel_data);
-
- // Creates a new AudioBus by wrapping an existing block of memory. Block must
- // be at least CalculateMemorySize() bytes in size. |data| must outlive the
- // returned AudioBus. |data| must be aligned by kChannelAlignment.
- static scoped_ptr<AudioBus> WrapMemory(int channels, int frames, void* data);
- static scoped_ptr<AudioBus> WrapMemory(const AudioParameters& params,
- void* data);
- // Returns the required memory size to use the WrapMemory() method.
- static int CalculateMemorySize(const AudioParameters& params);
-
- // Calculates the required size for an AudioBus given the number of channels
- // and frames.
- static int CalculateMemorySize(int channels, int frames);
-
- // Helper methods for converting an AudioBus from and to interleaved integer
- // data. Expects interleaving to be [ch0, ch1, ..., chN, ch0, ch1, ...] with
- // |bytes_per_sample| per value. Values are scaled and bias corrected during
- // conversion. ToInterleaved() will also clip values to format range.
- // Handles uint8, int16, and int32 currently. FromInterleaved() will zero out
- // any unfilled frames when |frames| is less than frames().
- void FromInterleaved(const void* source, int frames, int bytes_per_sample);
- void ToInterleaved(int frames, int bytes_per_sample, void* dest) const;
- void ToInterleavedPartial(int start_frame, int frames, int bytes_per_sample,
- void* dest) const;
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // The following two functions work on float samples instead of integer
- // samples.
- // FromInterleavedFloat fills the audio bus with interleaved samples. It is
- // possible to fill frames in the middle of the audio bus by using a non-zero
- // "audio_bus_offset". Note that it will not fill the rest samples with 0.
- // "frames" indicates frame count per channel instead of the combined frames.
- void FromInterleavedFloat(const float* source, int frames,
- int audio_bus_offset);
- // ToInterleavedFloat will interleave data from the audio bus and store them
- // into dest.
- // "frames" indicates frame count per channel instead of the combined frames.
- // It is an error if the requested frame is larger than what the audio bus
- // can offer.
- // "extra_channels" has to be greater than or equal to 0. A non-zero value
- // indicates that there are more channels in the "dest" than in this audio bus
- // and they will be filled with 0.
- void ToInterleavedFloat(int frames, int audio_bus_offset, int extra_channels,
- float* dest) const;
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
- // Similar to FromInterleaved() above, but meant for streaming sources. Does
- // not zero out remaining frames, the caller is responsible for doing so using
- // ZeroFramesPartial(). Frames are deinterleaved from the start of |source|
- // to channel(x)[start_frame].
- void FromInterleavedPartial(const void* source, int start_frame, int frames,
- int bytes_per_sample);
-
- // Helper method for copying channel data from one AudioBus to another. Both
- // AudioBus object must have the same frames() and channels().
- void CopyTo(AudioBus* dest) const;
-
- // Returns a raw pointer to the requested channel. Pointer is guaranteed to
- // have a 16-byte alignment. Warning: Do not rely on having sane (i.e. not
- // inf, nan, or between [-1.0, 1.0]) values in the channel data.
- float* channel(int channel) {
- return channel_data_[static_cast<size_t>(channel)];
- }
- const float* channel(int channel) const {
- return channel_data_[static_cast<size_t>(channel)];
- }
- void SetChannelData(int channel, float* data);
-
- int channels() const {
- DCHECK_LE(channel_data_.size(),
- static_cast<size_t>(std::numeric_limits<int>::max()));
- return static_cast<int>(channel_data_.size());
- }
-
- int frames() const { return frames_; }
- void set_frames(int frames);
-
- // Helper method for zeroing out all channels of audio data.
- void Zero();
- void ZeroFrames(int frames);
- void ZeroFramesPartial(int start_frame, int frames);
-
- private:
- friend class scoped_ptr<AudioBus>;
- ~AudioBus();
-
- AudioBus(int channels, int frames);
- AudioBus(int channels, int frames, float* data);
- AudioBus(int frames, const std::vector<float*>& channel_data);
- explicit AudioBus(int channels);
-
- // Helper method for building |channel_data_| from a block of memory. |data|
- // must be at least BlockSize() bytes in size.
- void BuildChannelData(int channels, int aligned_frame, float* data);
-
- // Contiguous block of channel memory.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
-
- std::vector<float*> channel_data_;
- int frames_;
-
- // Protect SetChannelData() and set_frames() for use by CreateWrapper().
- bool can_set_channel_data_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioBus);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_BUS_H_
diff --git a/src/media/base/audio_bus_unittest.cc b/src/media/base/audio_bus_unittest.cc
deleted file mode 100644
index 7b88019..0000000
--- a/src/media/base/audio_bus_unittest.cc
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "base/stringprintf.h"
-#include "base/time.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/channel_layout.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int kChannels = 6;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_5_1;
-// Use a buffer size which is intentionally not a multiple of kChannelAlignment.
-static const int kFrameCount = media::AudioBus::kChannelAlignment * 32 - 1;
-static const int kSampleRate = 48000;
-
-class AudioBusTest : public testing::Test {
- public:
- AudioBusTest() {}
- ~AudioBusTest() {
- for (size_t i = 0; i < data_.size(); ++i)
- base::AlignedFree(data_[i]);
- }
-
- // Validate parameters returned by AudioBus v.s. the constructed parameters.
- void VerifyParams(AudioBus* bus) {
- EXPECT_EQ(kChannels, bus->channels());
- EXPECT_EQ(kFrameCount, bus->frames());
- }
-
- void VerifyValue(const float data[], int size, float value) {
- for (int i = 0; i < size; ++i)
- ASSERT_FLOAT_EQ(value, data[i]) << "i=" << i;
- }
-
- // Verify values for each channel in |result| against |expected|.
- void VerifyBus(const AudioBus* result, const AudioBus* expected) {
- ASSERT_EQ(expected->channels(), result->channels());
- ASSERT_EQ(expected->frames(), result->frames());
- for (int ch = 0; ch < result->channels(); ++ch) {
- for (int i = 0; i < result->frames(); ++i) {
- SCOPED_TRACE(base::StringPrintf("ch=%d, i=%d", ch, i));
- ASSERT_FLOAT_EQ(expected->channel(ch)[i], result->channel(ch)[i]);
- }
- }
- }
-
- // Read and write to the full extent of the allocated channel data. Also test
- // the Zero() method and verify it does as advertised. Also test data if data
- // is 16-byte aligned as advertised (see kChannelAlignment in audio_bus.h).
- void VerifyChannelData(AudioBus* bus) {
- for (int i = 0; i < bus->channels(); ++i) {
- ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(
- bus->channel(i)) & (AudioBus::kChannelAlignment - 1));
- std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i);
- }
-
- for (int i = 0; i < bus->channels(); ++i)
- VerifyValue(bus->channel(i), bus->frames(), i);
-
- bus->Zero();
- for (int i = 0; i < bus->channels(); ++i)
- VerifyValue(bus->channel(i), bus->frames(), 0);
- }
-
- // Verify copying to and from |bus1| and |bus2|.
- void CopyTest(AudioBus* bus1, AudioBus* bus2) {
- // Fill |bus1| with dummy data.
- for (int i = 0; i < bus1->channels(); ++i)
- std::fill(bus1->channel(i), bus1->channel(i) + bus1->frames(), i);
-
- // Verify copy from |bus1| to |bus2|.
- bus2->Zero();
- bus1->CopyTo(bus2);
- VerifyBus(bus1, bus2);
-
- // Verify copy from |bus2| to |bus1|.
- bus1->Zero();
- bus2->CopyTo(bus1);
- VerifyBus(bus2, bus1);
- }
-
- protected:
- std::vector<float*> data_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioBusTest);
-};
-
-// Verify basic Create(...) method works as advertised.
-TEST_F(AudioBusTest, Create) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
- VerifyParams(bus.get());
- VerifyChannelData(bus.get());
-}
-
-// Verify Create(...) using AudioParameters works as advertised.
-TEST_F(AudioBusTest, CreateUsingAudioParameters) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
- kFrameCount));
- VerifyParams(bus.get());
- VerifyChannelData(bus.get());
-}
-
-// Verify an AudioBus created via wrapping a vector works as advertised.
-TEST_F(AudioBusTest, WrapVector) {
- data_.reserve(kChannels);
- for (int i = 0; i < kChannels; ++i) {
- data_.push_back(static_cast<float*>(base::AlignedAlloc(
- sizeof(*data_[i]) * kFrameCount, AudioBus::kChannelAlignment)));
- }
-
- scoped_ptr<AudioBus> bus = AudioBus::WrapVector(kFrameCount, data_);
- VerifyParams(bus.get());
- VerifyChannelData(bus.get());
-}
-
-// Verify an AudioBus created via wrapping a memory block works as advertised.
-TEST_F(AudioBusTest, WrapMemory) {
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
- kFrameCount);
- int data_size = AudioBus::CalculateMemorySize(params);
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
- base::AlignedAlloc(data_size, AudioBus::kChannelAlignment)));
-
- // Fill the memory with a test value we can check for after wrapping.
- static const float kTestValue = 3;
- std::fill(
- data.get(), data.get() + data_size / sizeof(*data.get()), kTestValue);
-
- scoped_ptr<AudioBus> bus = AudioBus::WrapMemory(params, data.get());
- // Verify the test value we filled prior to wrapping.
- for (int i = 0; i < bus->channels(); ++i)
- VerifyValue(bus->channel(i), bus->frames(), kTestValue);
- VerifyParams(bus.get());
- VerifyChannelData(bus.get());
-
- // Verify the channel vectors lie within the provided memory block.
- EXPECT_GE(bus->channel(0), data.get());
- EXPECT_LE(bus->channel(bus->channels() - 1) + bus->frames(),
- data.get() + data_size / sizeof(*data.get()));
-}
-
-// Simulate a shared memory transfer and verify results.
-TEST_F(AudioBusTest, CopyTo) {
- // Create one bus with AudioParameters and the other through direct values to
- // test for parity between the Create() functions.
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate, 32,
- kFrameCount);
- scoped_ptr<AudioBus> bus1 = AudioBus::Create(kChannels, kFrameCount);
- scoped_ptr<AudioBus> bus2 = AudioBus::Create(params);
-
- {
- SCOPED_TRACE("Created");
- CopyTest(bus1.get(), bus2.get());
- }
- {
- SCOPED_TRACE("Wrapped Vector");
- // Try a copy to an AudioBus wrapping a vector.
- data_.reserve(kChannels);
- for (int i = 0; i < kChannels; ++i) {
- data_.push_back(static_cast<float*>(base::AlignedAlloc(
- sizeof(*data_[i]) * kFrameCount, AudioBus::kChannelAlignment)));
- }
-
- bus2 = AudioBus::WrapVector(kFrameCount, data_);
- CopyTest(bus1.get(), bus2.get());
- }
- {
- SCOPED_TRACE("Wrapped Memory");
- // Try a copy to an AudioBus wrapping a memory block.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(
- static_cast<float*>(base::AlignedAlloc(
- AudioBus::CalculateMemorySize(params),
- AudioBus::kChannelAlignment)));
-
- bus2 = AudioBus::WrapMemory(params, data.get());
- CopyTest(bus1.get(), bus2.get());
- }
-}
-
-// Verify Zero() and ZeroFrames(...) utility methods work as advertised.
-TEST_F(AudioBusTest, Zero) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
-
- // Fill the bus with dummy data.
- for (int i = 0; i < bus->channels(); ++i)
- std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
-
- // Zero first half the frames of each channel.
- bus->ZeroFrames(kFrameCount / 2);
- for (int i = 0; i < bus->channels(); ++i) {
- SCOPED_TRACE("First Half Zero");
- VerifyValue(bus->channel(i), kFrameCount / 2, 0);
- VerifyValue(bus->channel(i) + kFrameCount / 2,
- kFrameCount - kFrameCount / 2, i + 1);
- }
-
- // Fill the bus with dummy data.
- for (int i = 0; i < bus->channels(); ++i)
- std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
-
- // Zero the last half of the frames.
- bus->ZeroFramesPartial(kFrameCount / 2, kFrameCount - kFrameCount / 2);
- for (int i = 0; i < bus->channels(); ++i) {
- SCOPED_TRACE("Last Half Zero");
- VerifyValue(bus->channel(i) + kFrameCount / 2,
- kFrameCount - kFrameCount / 2, 0);
- VerifyValue(bus->channel(i), kFrameCount / 2, i + 1);
- }
-
- // Fill the bus with dummy data.
- for (int i = 0; i < bus->channels(); ++i)
- std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
-
- // Zero all the frames of each channel.
- bus->Zero();
- for (int i = 0; i < bus->channels(); ++i) {
- SCOPED_TRACE("All Zero");
- VerifyValue(bus->channel(i), bus->frames(), 0);
- }
-}
-
-// Each test vector represents two channels of data in the following arbitrary
-// layout: <min, zero, max, min, zero, max, zero, zero>.
-static const int kTestVectorSize = 8;
-static const uint8 kTestVectorUint8[kTestVectorSize] = {
- 0, -kint8min, kuint8max, 0, -kint8min, kuint8max, -kint8min, -kint8min };
-static const int16 kTestVectorInt16[kTestVectorSize] = {
- kint16min, 0, kint16max, kint16min, 0, kint16max, 0, 0 };
-static const int32 kTestVectorInt32[kTestVectorSize] = {
- kint32min, 0, kint32max, kint32min, 0, kint32max, 0, 0 };
-
-// Expected results.
-static const int kTestVectorFrames = kTestVectorSize / 2;
-static const float kTestVectorResult[][kTestVectorFrames] = {
- { -1, 1, 0, 0 }, { 0, -1, 1, 0 }};
-static const int kTestVectorChannels = arraysize(kTestVectorResult);
-
-// Verify FromInterleaved() deinterleaves audio in supported formats correctly.
-TEST_F(AudioBusTest, FromInterleaved) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- scoped_ptr<AudioBus> expected = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- for (int ch = 0; ch < kTestVectorChannels; ++ch) {
- memcpy(expected->channel(ch), kTestVectorResult[ch],
- kTestVectorFrames * sizeof(*expected->channel(ch)));
- }
- {
- SCOPED_TRACE("uint8");
- bus->Zero();
- bus->FromInterleaved(
- kTestVectorUint8, kTestVectorFrames, sizeof(*kTestVectorUint8));
- VerifyBus(bus.get(), expected.get());
- }
- {
- SCOPED_TRACE("int16");
- bus->Zero();
- bus->FromInterleaved(
- kTestVectorInt16, kTestVectorFrames, sizeof(*kTestVectorInt16));
- VerifyBus(bus.get(), expected.get());
- }
- {
- SCOPED_TRACE("int32");
- bus->Zero();
- bus->FromInterleaved(
- kTestVectorInt32, kTestVectorFrames, sizeof(*kTestVectorInt32));
- VerifyBus(bus.get(), expected.get());
- }
-}
-
-// Verify FromInterleavedPartial() deinterleaves audio correctly.
-TEST_F(AudioBusTest, FromInterleavedPartial) {
- // Only deinterleave the middle two frames in each channel.
- static const int kPartialStart = 1;
- static const int kPartialFrames = 2;
- ASSERT_LE(kPartialStart + kPartialFrames, kTestVectorFrames);
-
- scoped_ptr<AudioBus> bus = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- scoped_ptr<AudioBus> expected = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- expected->Zero();
- for (int ch = 0; ch < kTestVectorChannels; ++ch) {
- memcpy(expected->channel(ch) + kPartialStart,
- kTestVectorResult[ch] + kPartialStart,
- kPartialFrames * sizeof(*expected->channel(ch)));
- }
-
- bus->Zero();
- bus->FromInterleavedPartial(
- kTestVectorInt32 + kPartialStart * bus->channels(), kPartialStart,
- kPartialFrames, sizeof(*kTestVectorInt32));
- VerifyBus(bus.get(), expected.get());
-}
-
-// Verify ToInterleaved() interleaves audio in suported formats correctly.
-TEST_F(AudioBusTest, ToInterleaved) {
- scoped_ptr<AudioBus> bus = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- // Fill the bus with our test vector.
- for (int ch = 0; ch < kTestVectorChannels; ++ch) {
- memcpy(bus->channel(ch), kTestVectorResult[ch],
- kTestVectorFrames * sizeof(*bus->channel(ch)));
- }
- {
- SCOPED_TRACE("uint8");
- uint8 test_array[arraysize(kTestVectorUint8)];
- bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorUint8), test_array);
- ASSERT_EQ(memcmp(
- test_array, kTestVectorUint8, arraysize(kTestVectorUint8)), 0);
- }
- {
- SCOPED_TRACE("int16");
- int16 test_array[arraysize(kTestVectorInt16)];
- bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt16), test_array);
- ASSERT_EQ(memcmp(
- test_array, kTestVectorInt16, arraysize(kTestVectorInt16)), 0);
- }
- {
- SCOPED_TRACE("int32");
- int32 test_array[arraysize(kTestVectorInt32)];
- bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt32), test_array);
- ASSERT_EQ(memcmp(
- test_array, kTestVectorInt32, arraysize(kTestVectorInt32)), 0);
- }
-}
-
-// Verify ToInterleavedPartial() interleaves audio correctly.
-TEST_F(AudioBusTest, ToInterleavedPartial) {
- // Only interleave the middle two frames in each channel.
- static const int kPartialStart = 1;
- static const int kPartialFrames = 2;
- ASSERT_LE(kPartialStart + kPartialFrames, kTestVectorFrames);
-
- scoped_ptr<AudioBus> expected = AudioBus::Create(
- kTestVectorChannels, kTestVectorFrames);
- for (int ch = 0; ch < kTestVectorChannels; ++ch) {
- memcpy(expected->channel(ch), kTestVectorResult[ch],
- kTestVectorFrames * sizeof(*expected->channel(ch)));
- }
-
- int16 test_array[arraysize(kTestVectorInt16)];
- expected->ToInterleavedPartial(
- kPartialStart, kPartialFrames, sizeof(*kTestVectorInt16), test_array);
- ASSERT_EQ(memcmp(
- test_array, kTestVectorInt16 + kPartialStart * kTestVectorChannels,
- kPartialFrames * sizeof(*kTestVectorInt16)), 0);
-}
-
-} // namespace media
diff --git a/src/media/base/audio_capturer_source.h b/src/media/base/audio_capturer_source.h
deleted file mode 100644
index 7611025..0000000
--- a/src/media/base/audio_capturer_source.h
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
-#define MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
-
-#include <vector>
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// AudioCapturerSource is an interface representing the source for
-// captured audio. An implementation will periodically call Capture() on a
-// callback object.
-class AudioCapturerSource
- : public base::RefCountedThreadSafe<media::AudioCapturerSource> {
- public:
- class CaptureCallback {
- public:
- // Callback to deliver the captured data from the OS.
- virtual void Capture(AudioBus* audio_source,
- int audio_delay_milliseconds,
- double volume) = 0;
-
- // Signals an error has occurred.
- virtual void OnCaptureError() = 0;
-
- protected:
- virtual ~CaptureCallback() {}
- };
-
- class CaptureEventHandler {
- public:
- // Notification to the client that the device with the specific |device_id|
- // has been started.
- virtual void OnDeviceStarted(const std::string& device_id) = 0;
-
- // Notification to the client that the device has been stopped.
- virtual void OnDeviceStopped() = 0;
-
- protected:
- virtual ~CaptureEventHandler() {}
- };
-
- // Sets information about the audio stream format and the device
- // to be used. It must be called before any of the other methods.
- // TODO(xians): Add |device_id| to this Initialize() function.
- virtual void Initialize(const AudioParameters& params,
- CaptureCallback* callback,
- CaptureEventHandler* event_handler) = 0;
-
- // Starts the audio recording.
- virtual void Start() = 0;
-
- // Stops the audio recording. This API is synchronous, and no more data
- // callback will be passed to the client after it is being called.
- virtual void Stop() = 0;
-
- // Sets the capture volume, with range [0.0, 1.0] inclusive.
- virtual void SetVolume(double volume) = 0;
-
- // Specifies the |session_id| to query which device to use.
- // TODO(xians): Change the interface to SetDevice(const std::string&).
- virtual void SetDevice(int session_id) = 0;
-
- // Enables or disables the WebRtc AGC control.
- virtual void SetAutomaticGainControl(bool enable) = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<AudioCapturerSource>;
- virtual ~AudioCapturerSource() {}
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_CAPTURER_SOURCE_H_
diff --git a/src/media/base/audio_decoder.cc b/src/media/base/audio_decoder.cc
deleted file mode 100644
index 9390660..0000000
--- a/src/media/base/audio_decoder.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_decoder.h"
-
-namespace media {
-
-AudioDecoder::AudioDecoder() {}
-
-AudioDecoder::~AudioDecoder() {}
-
-} // namespace media
diff --git a/src/media/base/audio_decoder.h b/src/media/base/audio_decoder.h
deleted file mode 100644
index 36cfe4a..0000000
--- a/src/media/base/audio_decoder.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_DECODER_H_
-#define MEDIA_BASE_AUDIO_DECODER_H_
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/channel_layout.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class Buffer;
-class DemuxerStream;
-
-class MEDIA_EXPORT AudioDecoder
- : public base::RefCountedThreadSafe<AudioDecoder> {
- public:
- // Status codes for read operations.
- enum Status {
- kOk,
- kAborted,
- kDecodeError,
- };
- typedef std::vector<scoped_refptr<Buffer> > Buffers;
-
- // Initialize an AudioDecoder with the given DemuxerStream, executing the
- // callback upon completion.
- // statistics_cb is used to update global pipeline statistics.
- virtual void Initialize(const scoped_refptr<DemuxerStream>& stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) = 0;
-
- // Request samples to be decoded and returned via the provided callback.
- // Only one read may be in flight at any given time.
- //
- // Implementations guarantee that the callback will not be called from within
- // this method.
- //
- // Non-NULL sample buffer pointers will contain decoded audio data or may
- // indicate the end of the stream. A NULL buffer pointer indicates an aborted
- // Read(). This can happen if the DemuxerStream gets flushed and doesn't have
- // any more data to return.
-#if !defined(COBALT) && !defined(__LB_SHELL__)
- typedef base::Callback<void(Status, const scoped_refptr<Buffer>&)> ReadCB;
-#else // !defined(COBALT) && !defined(__LB_SHELL__)
- // This is a variant of the above callback that can pass multiple decoded
- // audio buffers at once. Non-empty sample buffers will contain decoded audio
- // data or may indicate the end of the stream. Empty buffers indicate an
- // aborted Read(). This can happen if the DemuxerStream gets flushed and
- // doesn't have any more data to return.
- typedef base::Callback<void(Status, const Buffers&)> ReadCB;
-#endif // !defined(COBALT) && !defined(__LB_SHELL__)
- virtual void Read(const ReadCB& read_cb) = 0;
-
- // Reset decoder state, dropping any queued encoded data.
- virtual void Reset(const base::Closure& closure) = 0;
-
- // Returns various information about the decoded audio format.
- virtual int bits_per_channel() = 0;
- virtual ChannelLayout channel_layout() = 0;
- virtual int samples_per_second() = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<AudioDecoder>;
- virtual ~AudioDecoder();
- AudioDecoder();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_DECODER_H_
diff --git a/src/media/base/audio_decoder_config.cc b/src/media/base/audio_decoder_config.cc
deleted file mode 100644
index 8d335db..0000000
--- a/src/media/base/audio_decoder_config.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_decoder_config.h"
-
-#include <sstream>
-
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "media/audio/sample_rates.h"
-#include "media/base/limits.h"
-
-namespace media {
-
-AudioDecoderConfig::AudioDecoderConfig()
- : codec_(kUnknownAudioCodec),
- bits_per_channel_(0),
- channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
- samples_per_second_(0),
- bytes_per_frame_(0),
- extra_data_size_(0),
- is_encrypted_(false) {
-}
-
-AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
- int bits_per_channel,
- ChannelLayout channel_layout,
- int samples_per_second,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted) {
- Initialize(codec, bits_per_channel, channel_layout, samples_per_second,
- extra_data, extra_data_size, is_encrypted, true);
-}
-
-void AudioDecoderConfig::Initialize(AudioCodec codec,
- int bits_per_channel,
- ChannelLayout channel_layout,
- int samples_per_second,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted,
- bool record_stats) {
- CHECK((extra_data_size != 0) == (extra_data != NULL));
-
- if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax + 1);
- // Fake enum histogram to get exact integral buckets. Expect to never see
- // any values over 32 and even that is huge.
- UMA_HISTOGRAM_ENUMERATION("Media.AudioBitsPerChannel", bits_per_channel,
- 40);
- UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout,
- CHANNEL_LAYOUT_MAX);
- AudioSampleRate asr = media::AsAudioSampleRate(samples_per_second);
- if (asr != kUnexpectedAudioSampleRate) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
- kUnexpectedAudioSampleRate);
- } else {
- UMA_HISTOGRAM_COUNTS(
- "Media.AudioSamplesPerSecondUnexpected", samples_per_second);
- }
- }
-
- codec_ = codec;
- bits_per_channel_ = bits_per_channel;
- channel_layout_ = channel_layout;
- samples_per_second_ = samples_per_second;
- extra_data_size_ = extra_data_size;
-
- if (extra_data_size_ > 0) {
- extra_data_.reset(new uint8[extra_data_size_]);
- memcpy(extra_data_.get(), extra_data, extra_data_size_);
- } else {
- extra_data_.reset();
- }
-
- is_encrypted_ = is_encrypted;
-
- int channels = ChannelLayoutToChannelCount(channel_layout_);
- bytes_per_frame_ = channels * bits_per_channel_ / 8;
-}
-
-AudioDecoderConfig::~AudioDecoderConfig() {}
-
-bool AudioDecoderConfig::IsValidConfig() const {
- return codec_ != kUnknownAudioCodec &&
- channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
- bits_per_channel_ > 0 &&
- bits_per_channel_ <= limits::kMaxBitsPerSample &&
- samples_per_second_ > 0 &&
- samples_per_second_ <= limits::kMaxSampleRate;
-}
-
-bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
- return ((codec() == config.codec()) &&
- (bits_per_channel() == config.bits_per_channel()) &&
- (channel_layout() == config.channel_layout()) &&
- (samples_per_second() == config.samples_per_second()) &&
- (extra_data_size() == config.extra_data_size()) &&
- (!extra_data() || !memcmp(extra_data(), config.extra_data(),
- extra_data_size())) &&
- (is_encrypted() == config.is_encrypted()));
-}
-
-std::string AudioDecoderConfig::AsHumanReadableString() const {
- std::ostringstream s;
- s << "codec: " << codec()
- << " bits per channel: " << bits_per_channel()
- << " channel layout: " << channel_layout()
- << " samples per second: " << samples_per_second()
- << " bytes per frame: " << bytes_per_frame()
- << " has extra data? " << (extra_data() ? "true" : "false")
- << " encrypted? " << (is_encrypted() ? "true" : "false");
- return s.str();
-}
-
-void AudioDecoderConfig::CopyFrom(const AudioDecoderConfig& audio_config) {
- Initialize(audio_config.codec(),
- audio_config.bits_per_channel(),
- audio_config.channel_layout(),
- audio_config.samples_per_second(),
- audio_config.extra_data(),
- audio_config.extra_data_size(),
- audio_config.is_encrypted(),
- false);
-}
-
-AudioCodec AudioDecoderConfig::codec() const {
- return codec_;
-}
-
-int AudioDecoderConfig::bits_per_channel() const {
- return bits_per_channel_;
-}
-
-ChannelLayout AudioDecoderConfig::channel_layout() const {
- return channel_layout_;
-}
-
-int AudioDecoderConfig::samples_per_second() const {
- return samples_per_second_;
-}
-
-int AudioDecoderConfig::bytes_per_frame() const {
- return bytes_per_frame_;
-}
-
-uint8* AudioDecoderConfig::extra_data() const {
- return extra_data_.get();
-}
-
-size_t AudioDecoderConfig::extra_data_size() const {
- return extra_data_size_;
-}
-
-bool AudioDecoderConfig::is_encrypted() const {
- return is_encrypted_;
-}
-
-} // namespace media
diff --git a/src/media/base/audio_decoder_config.h b/src/media/base/audio_decoder_config.h
deleted file mode 100644
index dd6a11b..0000000
--- a/src/media/base/audio_decoder_config.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
-#define MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/channel_layout.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-enum AudioCodec {
- // These values are histogrammed over time; do not change their ordinal
- // values. When deleting a codec replace it with a dummy value; when adding a
- // codec, do so at the bottom (and update kAudioCodecMax).
- kUnknownAudioCodec = 0,
- kCodecAAC,
- kCodecMP3,
- kCodecPCM,
- kCodecVorbis,
- kCodecFLAC,
- kCodecAMR_NB,
- kCodecAMR_WB,
- kCodecPCM_MULAW,
- kCodecGSM_MS,
- kCodecPCM_S16BE,
- kCodecPCM_S24BE,
- kCodecOpus,
- // DO NOT ADD RANDOM AUDIO CODECS!
- //
- // The only acceptable time to add a new codec is if there is production code
- // that uses said codec in the same CL.
-
- kAudioCodecMax = kCodecOpus // Must equal the last "real" codec above.
-};
-
-// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
-// |bits_per_channel|, we should switch over since bits are generally confusing
-// to work with.
-class MEDIA_EXPORT AudioDecoderConfig {
- public:
- // Constructs an uninitialized object. Clients should call Initialize() with
- // appropriate values before using.
- AudioDecoderConfig();
-
- // Constructs an initialized object. It is acceptable to pass in NULL for
- // |extra_data|, otherwise the memory is copied.
- AudioDecoderConfig(AudioCodec codec, int bits_per_channel,
- ChannelLayout channel_layout, int samples_per_second,
- const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted);
-
- ~AudioDecoderConfig();
-
- // Resets the internal state of this object.
- void Initialize(AudioCodec codec, int bits_per_channel,
- ChannelLayout channel_layout, int samples_per_second,
- const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted,
- bool record_stats);
-
- // Deep copies |audio_config|.
- void CopyFrom(const AudioDecoderConfig& audio_config);
-
- // Returns true if this object has appropriate configuration values, false
- // otherwise.
- bool IsValidConfig() const;
-
- // Returns true if all fields in |config| match this config.
- // Note: The contents of |extra_data_| are compared not the raw pointers.
- bool Matches(const AudioDecoderConfig& config) const;
-
- // Returns a human-readable string describing |*this|. For debugging & test
- // output only.
- std::string AsHumanReadableString() const;
-
- AudioCodec codec() const;
- int bits_per_channel() const;
- ChannelLayout channel_layout() const;
- int samples_per_second() const;
- int bytes_per_frame() const;
-
- // Optional byte data required to initialize audio decoders such as Vorbis
- // codebooks.
- uint8* extra_data() const;
- size_t extra_data_size() const;
-
- // Whether the audio stream is potentially encrypted.
- // Note that in a potentially encrypted audio stream, individual buffers
- // can be encrypted or not encrypted.
- bool is_encrypted() const;
-
- private:
- AudioCodec codec_;
- int bits_per_channel_;
- ChannelLayout channel_layout_;
- int samples_per_second_;
- int bytes_per_frame_;
-
- scoped_array<uint8> extra_data_;
- size_t extra_data_size_;
-
- bool is_encrypted_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderConfig);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
diff --git a/src/media/base/audio_fifo.cc b/src/media/base/audio_fifo.cc
deleted file mode 100644
index b6e8f80..0000000
--- a/src/media/base/audio_fifo.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_fifo.h"
-
-#include "base/logging.h"
-
-using base::subtle::Atomic32;
-using base::subtle::NoBarrier_Store;
-
-namespace media {
-
-// Given current position in the FIFO, the maximum number of elements in the
-// FIFO and the size of the input; this method provides two output results:
-// |size| and |wrap_size|. These two results can then be utilized for memcopy
-// operations to and from the FIFO.
-// Under "normal" circumstances, |size| will be equal to |in_size| and
-// |wrap_size| will be zero. This case corresponding to the non-wrapping case
-// where we have not yet reached the "edge" of the FIFO. If |pos| + |in_size|
-// exceeds the total size of the FIFO, we must wrap around and start reusing
-// a part the allocated memory. The size of this part is given by |wrap_size|.
-static void GetSizes(
- int pos, int max_size, int in_size, int* size, int* wrap_size) {
- if (pos + in_size > max_size) {
- // Wrapping is required => derive size of each segment.
- *size = max_size - pos;
- *wrap_size = in_size - *size;
- } else {
- // Wrapping is not required.
- *size = in_size;
- *wrap_size = 0;
- }
-}
-
-// Updates the read/write position with |step| modulo the maximum number of
-// elements in the FIFO to ensure that the position counters wraps around at
-// the endpoint.
-static int UpdatePos(int pos, int step, int max_size) {
- return ((pos + step) % max_size);
-}
-
-AudioFifo::AudioFifo(int channels, int frames)
- : audio_bus_(AudioBus::Create(channels, frames)),
- max_frames_(frames),
- frames_pushed_(0),
- frames_consumed_(0),
- read_pos_(0),
- write_pos_(0) {}
-
-AudioFifo::~AudioFifo() {}
-
-int AudioFifo::frames() const {
- int delta = frames_pushed_ - frames_consumed_;
- base::subtle::MemoryBarrier();
- return delta;
-}
-
-void AudioFifo::Push(const AudioBus* source) {
- DCHECK(source);
- DCHECK_EQ(source->channels(), audio_bus_->channels());
-
- // Ensure that there is space for the new data in the FIFO.
- const int source_size = source->frames();
- CHECK_LE(source_size + frames(), max_frames_);
-
- // Figure out if wrapping is needed and if so what segment sizes we need
- // when adding the new audio bus content to the FIFO.
- int append_size = 0;
- int wrap_size = 0;
- GetSizes(write_pos_, max_frames(), source_size, &append_size, &wrap_size);
-
- // Copy all channels from the source to the FIFO. Wrap around if needed.
- for (int ch = 0; ch < source->channels(); ++ch) {
- float* dest = audio_bus_->channel(ch);
- const float* src = source->channel(ch);
-
- // Append part of (or the complete) source to the FIFO.
- memcpy(&dest[write_pos_], &src[0], append_size * sizeof(src[0]));
- if (wrap_size > 0) {
- // Wrapping is needed: copy remaining part from the source to the FIFO.
- memcpy(&dest[0], &src[append_size], wrap_size * sizeof(src[0]));
- }
- }
-
- // Ensure the data is *really* written before updating |frames_pushed_|.
- base::subtle::MemoryBarrier();
-
- Atomic32 new_frames_pushed = frames_pushed_ + source_size;
- NoBarrier_Store(&frames_pushed_, new_frames_pushed);
-
- DCHECK_LE(frames(), max_frames());
- write_pos_ = UpdatePos(write_pos_, source_size, max_frames());
-}
-
-void AudioFifo::Consume(AudioBus* destination,
- int start_frame,
- int frames_to_consume) {
- DCHECK(destination);
- DCHECK_EQ(destination->channels(), audio_bus_->channels());
-
- // It is not possible to ask for more data than what is available in the FIFO.
- CHECK_LE(frames_to_consume, frames());
-
- // A copy from the FIFO to |destination| will only be performed if the
- // allocated memory in |destination| is sufficient.
- CHECK_LE(frames_to_consume + start_frame, destination->frames());
-
- // Figure out if wrapping is needed and if so what segment sizes we need
- // when removing audio bus content from the FIFO.
- int consume_size = 0;
- int wrap_size = 0;
- GetSizes(read_pos_, max_frames(), frames_to_consume,
- &consume_size, &wrap_size);
-
- // For all channels, remove the requested amount of data from the FIFO
- // and copy the content to the destination. Wrap around if needed.
- for (int ch = 0; ch < destination->channels(); ++ch) {
- float* dest = destination->channel(ch);
- const float* src = audio_bus_->channel(ch);
-
- // Copy a selected part of the FIFO to the destination.
- memcpy(&dest[start_frame], &src[read_pos_], consume_size * sizeof(src[0]));
- if (wrap_size > 0) {
- // Wrapping is needed: copy remaining part to the destination.
- memcpy(&dest[consume_size + start_frame], &src[0],
- wrap_size * sizeof(src[0]));
- }
- }
-
- Atomic32 new_frames_consumed = frames_consumed_ + frames_to_consume;
- NoBarrier_Store(&frames_consumed_, new_frames_consumed);
-
- read_pos_ = UpdatePos(read_pos_, frames_to_consume, max_frames());
-}
-
-void AudioFifo::Clear() {
- frames_pushed_ = 0;
- frames_consumed_ = 0;
- read_pos_ = 0;
- write_pos_ = 0;
-}
-
-} // namespace media
diff --git a/src/media/base/audio_fifo.h b/src/media/base/audio_fifo.h
deleted file mode 100644
index e978ace..0000000
--- a/src/media/base/audio_fifo.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_FIFO_H_
-#define MEDIA_BASE_AUDIO_FIFO_H_
-
-#include "base/atomicops.h"
-#include "media/base/audio_bus.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// First-in first-out container for AudioBus elements.
-// The maximum number of audio frames in the FIFO is set at construction and
-// can not be extended dynamically. The allocated memory is utilized as a
-// ring buffer.
-// This class is thread-safe in the limited sense that one thread may call
-// Push(), while a second thread calls Consume().
-class MEDIA_EXPORT AudioFifo {
- public:
- // Creates a new AudioFifo and allocates |channels| of length |frames|.
- AudioFifo(int channels, int frames);
- virtual ~AudioFifo();
-
- // Pushes all audio channel data from |source| to the FIFO.
- // Push() will crash if the allocated space is insufficient.
- void Push(const AudioBus* source);
-
- // Consumes |frames_to_consume| audio frames from the FIFO and copies
- // them to |destination| starting at position |start_frame|.
- // Consume() will crash if the FIFO does not contain |frames_to_consume|
- // frames or if there is insufficient space in |destination| to store the
- // frames.
- void Consume(AudioBus* destination, int start_frame, int frames_to_consume);
-
- // Empties the FIFO without deallocating any memory.
- void Clear();
-
- // Number of actual audio frames in the FIFO.
- int frames() const;
-
- int max_frames() const { return max_frames_; }
-
- private:
- // The actual FIFO is an audio bus implemented as a ring buffer.
- scoped_ptr<AudioBus> audio_bus_;
-
- // Maximum number of elements the FIFO can contain.
- // This value is set by |frames| in the constructor.
- const int max_frames_;
-
- // Number of actual elements in the FIFO.
- volatile base::subtle::Atomic32 frames_pushed_;
- volatile base::subtle::Atomic32 frames_consumed_;
-
- // Current read position.
- int read_pos_;
-
- // Current write position.
- int write_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioFifo);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_FIFO_H_
diff --git a/src/media/base/audio_fifo_unittest.cc b/src/media/base/audio_fifo_unittest.cc
deleted file mode 100644
index dd5ffd9..0000000
--- a/src/media/base/audio_fifo_unittest.cc
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// TODO(henrika): add test which included |start_frame| in Consume() call.
-
-#include "media/base/audio_fifo.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class AudioFifoTest : public testing::Test {
- public:
- AudioFifoTest() {}
- ~AudioFifoTest() {}
-
- void VerifyValue(const float data[], int size, float value) {
- for (int i = 0; i < size; ++i)
- ASSERT_FLOAT_EQ(value, data[i]) << "i=" << i;
- }
-
- protected:
- DISALLOW_COPY_AND_ASSIGN(AudioFifoTest);
-};
-
-// Verify that construction works as intended.
-TEST_F(AudioFifoTest, Construct) {
- static const int kChannels = 6;
- static const int kMaxFrameCount = 128;
- AudioFifo fifo(kChannels, kMaxFrameCount);
- EXPECT_EQ(fifo.frames(), 0);
-}
-
-// Pushes audio bus objects to a FIFO and fill it up to different degrees.
-TEST_F(AudioFifoTest, Push) {
- static const int kChannels = 2;
- static const int kMaxFrameCount = 128;
- AudioFifo fifo(kChannels, kMaxFrameCount);
- {
- SCOPED_TRACE("Push 50%");
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount / 2);
- EXPECT_EQ(fifo.frames(), 0);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), bus->frames());
- fifo.Clear();
- }
- {
- SCOPED_TRACE("Push 100%");
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
- EXPECT_EQ(fifo.frames(), 0);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), bus->frames());
- fifo.Clear();
- }
-}
-
-// Consumes audio bus objects from a FIFO and empty it to different degrees.
-TEST_F(AudioFifoTest, Consume) {
- static const int kChannels = 2;
- static const int kMaxFrameCount = 128;
- AudioFifo fifo(kChannels, kMaxFrameCount);
- {
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), kMaxFrameCount);
- }
- {
- SCOPED_TRACE("Consume 50%");
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount / 2);
- fifo.Consume(bus.get(), 0, bus->frames());
- EXPECT_TRUE(fifo.frames() == bus->frames());
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), kMaxFrameCount);
- }
- {
- SCOPED_TRACE("Consume 100%");
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kMaxFrameCount);
- fifo.Consume(bus.get(), 0, bus->frames());
- EXPECT_EQ(fifo.frames(), 0);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), kMaxFrameCount);
- }
-}
-
-// Verify that the frames() method of the FIFO works as intended while
-// appending and removing audio bus elements to/from the FIFO.
-TEST_F(AudioFifoTest, FramesInFifo) {
- static const int kChannels = 2;
- static const int kMaxFrameCount = 64;
- AudioFifo fifo(kChannels, kMaxFrameCount);
-
- // Fill up the FIFO and verify that the size grows as it should while adding
- // one audio frame each time.
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, 1);
- int n = 0;
- while (fifo.frames() < kMaxFrameCount) {
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), ++n);
- }
- EXPECT_EQ(fifo.frames(), kMaxFrameCount);
-
- // Empty the FIFO and verify that the size decreases as it should.
- // Reduce the size of the FIFO by one frame each time.
- while (fifo.frames() > 0) {
- fifo.Consume(bus.get(), 0, bus->frames());
- EXPECT_EQ(fifo.frames(), --n);
- }
- EXPECT_EQ(fifo.frames(), 0);
-
- // Verify that a steady-state size of #frames in the FIFO is maintained
- // during a sequence of Push/Consume calls which involves wrapping. We ensure
- // wrapping by selecting a buffer size which does divides the FIFO size
- // with a remainder of one.
- scoped_ptr<AudioBus> bus2 =
- AudioBus::Create(kChannels, (kMaxFrameCount / 4) - 1);
- const int frames_in_fifo = bus2->frames();
- fifo.Push(bus2.get());
- EXPECT_EQ(fifo.frames(), frames_in_fifo);
- for (int n = 0; n < kMaxFrameCount; ++n) {
- fifo.Push(bus2.get());
- fifo.Consume(bus2.get(), 0, frames_in_fifo);
- EXPECT_EQ(fifo.frames(), frames_in_fifo);
- }
-}
-
-// Perform a sequence of Push/Consume calls and verify that the data written
-// to the FIFO is correctly retrieved, i.e., that the order is correct and the
-// values are correct.
-TEST_F(AudioFifoTest, VerifyDataValues) {
- static const int kChannels = 2;
- static const int kFrameCount = 2;
- static const int kFifoFrameCount = 5 * kFrameCount;
-
- AudioFifo fifo(kChannels, kFifoFrameCount);
- scoped_ptr<AudioBus> bus = AudioBus::Create(kChannels, kFrameCount);
- EXPECT_EQ(fifo.frames(), 0);
- EXPECT_EQ(bus->frames(), kFrameCount);
-
- // Start by filling up the FIFO with audio frames. The first audio frame
- // will contain all 1's, the second all 2's etc. All channels contain the
- // same value.
- int value = 1;
- while (fifo.frames() < kFifoFrameCount) {
- for (int j = 0; j < bus->channels(); ++j)
- std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), bus->frames() * value);
- ++value;
- }
-
- // FIFO should be full now.
- EXPECT_EQ(fifo.frames(), kFifoFrameCount);
-
- // Consume all audio frames in the FIFO and verify that the stored values
- // are correct. In this example, we shall read out: 1, 2, 3, 4, 5 in that
- // order. Note that we set |frames_to_consume| to half the size of the bus.
- // It means that we shall read out the same value two times in row.
- value = 1;
- int n = 1;
- const int frames_to_consume = bus->frames() / 2;
- while (fifo.frames() > 0) {
- fifo.Consume(bus.get(), 0, frames_to_consume);
- for (int j = 0; j < bus->channels(); ++j)
- VerifyValue(bus->channel(j), frames_to_consume, value);
- if (n++ % 2 == 0)
- ++value; // counts 1, 1, 2, 2, 3, 3,...
- }
-
- // FIFO should be empty now.
- EXPECT_EQ(fifo.frames(), 0);
-
- // Push one audio bus to the FIFO and fill it with 1's.
- value = 1;
- for (int j = 0; j < bus->channels(); ++j)
- std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value);
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), bus->frames());
-
- // Keep calling Consume/Push a few rounds and verify that we read out the
- // correct values. The number of elements shall be fixed (kFrameCount) during
- // this phase.
- for (int i = 0; i < 5 * kFifoFrameCount; i++) {
- fifo.Consume(bus.get(), 0, bus->frames());
- for (int j = 0; j < bus->channels(); ++j) {
- VerifyValue(bus->channel(j), bus->channels(), value);
- std::fill(bus->channel(j), bus->channel(j) + bus->frames(), value + 1);
- }
- fifo.Push(bus.get());
- EXPECT_EQ(fifo.frames(), bus->frames());
- ++value;
- }
-}
-
-} // namespace media
diff --git a/src/media/base/audio_pull_fifo.cc b/src/media/base/audio_pull_fifo.cc
deleted file mode 100644
index 4943591..0000000
--- a/src/media/base/audio_pull_fifo.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_pull_fifo.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-
-namespace media {
-
-AudioPullFifo::AudioPullFifo(int channels, int frames, const ReadCB& read_cb)
- : read_cb_(read_cb) {
- fifo_.reset(new AudioFifo(channels, frames));
- bus_ = AudioBus::Create(channels, frames);
-}
-
-AudioPullFifo::~AudioPullFifo() {
- read_cb_.Reset();
-}
-
-void AudioPullFifo::Consume(AudioBus* destination, int frames_to_consume) {
- DCHECK(destination);
- DCHECK_LE(frames_to_consume, destination->frames());
-
- int write_pos = 0;
- int remaining_frames_to_provide = frames_to_consume;
-
- // Try to fulfill the request using what's available in the FIFO.
- ReadFromFifo(destination, &remaining_frames_to_provide, &write_pos);
-
- // Get the remaining audio frames from the producer using the callback.
- while (remaining_frames_to_provide > 0) {
- // Fill up the FIFO by acquiring audio data from the producer.
- read_cb_.Run(write_pos, bus_.get());
- fifo_->Push(bus_.get());
-
- // Try to fulfill the request using what's available in the FIFO.
- ReadFromFifo(destination, &remaining_frames_to_provide, &write_pos);
- }
-}
-
-void AudioPullFifo::Clear() {
- fifo_->Clear();
-}
-
-void AudioPullFifo::ReadFromFifo(AudioBus* destination,
- int* frames_to_provide,
- int* write_pos) {
- DCHECK(frames_to_provide);
- DCHECK(write_pos);
- int frames = std::min(fifo_->frames(), *frames_to_provide);
- fifo_->Consume(destination, *write_pos, frames);
- *write_pos += frames;
- *frames_to_provide -= frames;
-}
-
-} // namespace media
diff --git a/src/media/base/audio_pull_fifo.h b/src/media/base/audio_pull_fifo.h
deleted file mode 100644
index caf73e4..0000000
--- a/src/media/base/audio_pull_fifo.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_PULL_FIFO_H_
-#define MEDIA_BASE_AUDIO_PULL_FIFO_H_
-
-#include "base/callback.h"
-#include "media/base/audio_fifo.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// A FIFO (First In First Out) buffer to handle mismatches in buffer sizes
-// between a producer and consumer. The consumer will pull data from this FIFO.
-// If data is already available in the FIFO, it is provided to the consumer.
-// If insufficient data is available to satisfy the request, the FIFO will ask
-// the producer for more data to fulfill a request.
-class MEDIA_EXPORT AudioPullFifo {
- public:
- // Callback type for providing more data into the FIFO. Expects AudioBus
- // to be completely filled with data upon return; zero padded if not enough
- // frames are available to satisfy the request. |frame_delay| is the number
- // of output frames already processed and can be used to estimate delay.
- typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
-
- // Constructs an AudioPullFifo with the specified |read_cb|, which is used to
- // read audio data to the FIFO if data is not already available. The internal
- // FIFO can contain |channel| number of channels, where each channel is of
- // length |frames| audio frames.
- AudioPullFifo(int channels, int frames, const ReadCB& read_cb);
- virtual ~AudioPullFifo();
-
- // Consumes |frames_to_consume| audio frames from the FIFO and copies
- // them to |destination|. If the FIFO does not have enough data, we ask
- // the producer to give us more data to fulfill the request using the
- // ReadCB implementation.
- void Consume(AudioBus* destination, int frames_to_consume);
-
- // Empties the FIFO without deallocating any memory.
- void Clear();
-
- private:
- // Attempt to fulfill the request using what is available in the FIFO.
- // Append new data to the |destination| starting at |write_pos|.
- void ReadFromFifo(
- AudioBus* destination, int* frames_to_provide, int* write_pos);
-
- // Source of data to the FIFO.
- ReadCB read_cb_;
-
- // The actual FIFO.
- scoped_ptr<AudioFifo> fifo_;
-
- // Temporary audio bus to hold the data from the producer.
- scoped_ptr<AudioBus> bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioPullFifo);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_PULL_FIFO_H_
diff --git a/src/media/base/audio_pull_fifo_unittest.cc b/src/media/base/audio_pull_fifo_unittest.cc
deleted file mode 100644
index cec4d35..0000000
--- a/src/media/base/audio_pull_fifo_unittest.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/stringprintf.h"
-#include "media/base/audio_pull_fifo.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-// Block diagram of a possible real-world usage:
-//
-// | Producer | ----> | AudioPullFifo | ----> | Consumer |
-// push pull
-// 2048 ----> (2048) ----> ~512
-
-// Number of channels in each audio bus.
-static int kChannels = 2;
-
-// Max number of audio framed the FIFO can contain.
-static const int kMaxFramesInFifo = 2048;
-
-class AudioPullFifoTest
- : public testing::TestWithParam<int> {
- public:
- AudioPullFifoTest()
- : pull_fifo_(kChannels, kMaxFramesInFifo, base::Bind(
- &AudioPullFifoTest::ProvideInput, base::Unretained(this))),
- audio_bus_(AudioBus::Create(kChannels, kMaxFramesInFifo)),
- fill_value_(0),
- last_frame_delay_(-1) {}
- virtual ~AudioPullFifoTest() {}
-
- void VerifyValue(const float data[], int size, float start_value) {
- float value = start_value;
- for (int i = 0; i < size; ++i) {
- ASSERT_FLOAT_EQ(value++, data[i]) << "i=" << i;
- }
- }
-
- // Consume data using different sizes, acquire audio frames from the FIFO
- // and verify that the retrieved values matches the values written by the
- // producer.
- void ConsumeTest(int frames_to_consume) {
- int start_value = 0;
- SCOPED_TRACE(base::StringPrintf("Checking frames_to_consume %d",
- frames_to_consume));
- pull_fifo_.Consume(audio_bus_.get(), frames_to_consume);
- for (int j = 0; j < kChannels; ++j) {
- VerifyValue(audio_bus_->channel(j), frames_to_consume, start_value);
- }
- start_value += frames_to_consume;
- EXPECT_LT(last_frame_delay_, audio_bus_->frames());
- }
-
- // AudioPullFifo::ReadCB implementation where we increase a value for each
- // audio frame that we provide. Note that all channels are given the same
- // value to simplify the verification.
- virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
- ASSERT_GT(frame_delay, last_frame_delay_);
- last_frame_delay_ = frame_delay;
-
- EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
- EXPECT_EQ(audio_bus->frames(), kMaxFramesInFifo);
- for (int i = 0; i < audio_bus->frames(); ++i) {
- for (int j = 0; j < audio_bus->channels(); ++j) {
- // Store same value in all channels.
- audio_bus->channel(j)[i] = fill_value_;
- }
- fill_value_++;
- }
- }
-
- protected:
- AudioPullFifo pull_fifo_;
- scoped_ptr<AudioBus> audio_bus_;
- int fill_value_;
- int last_frame_delay_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioPullFifoTest);
-};
-
-TEST_P(AudioPullFifoTest, Consume) {
- ConsumeTest(GetParam());
-}
-
-// Test common |frames_to_consume| values which will be used as input
-// parameter to AudioPullFifo::Consume() when the consumer asks for data.
-INSTANTIATE_TEST_CASE_P(
- AudioPullFifoTest, AudioPullFifoTest,
- testing::Values(544, 512, 512, 512, 512, 2048, 544, 441, 440, 433, 500));
-
-} // namespace media
diff --git a/src/media/base/audio_renderer.cc b/src/media/base/audio_renderer.cc
deleted file mode 100644
index e7b737e..0000000
--- a/src/media/base/audio_renderer.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_renderer.h"
-
-namespace media {
-
-AudioRenderer::AudioRenderer() {}
-AudioRenderer::~AudioRenderer() {}
-
-} // namespace media
diff --git a/src/media/base/audio_renderer.h b/src/media/base/audio_renderer.h
deleted file mode 100644
index 19459ac..0000000
--- a/src/media/base/audio_renderer.h
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_RENDERER_H_
-#define MEDIA_BASE_AUDIO_RENDERER_H_
-
-#include <list>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-
-namespace media {
-
-class AudioDecoder;
-class DemuxerStream;
-
-class MEDIA_EXPORT AudioRenderer
- : public base::RefCountedThreadSafe<AudioRenderer> {
- public:
- typedef std::list<scoped_refptr<AudioDecoder> > AudioDecoderList;
-
- // First parameter is the current time that has been rendered.
- // Second parameter is the maximum time value that the clock cannot exceed.
- typedef base::Callback<void(base::TimeDelta, base::TimeDelta)> TimeCB;
-
- // Initialize a AudioRenderer with the given AudioDecoder, executing the
- // |init_cb| upon completion.
- //
- // |statistics_cb| is executed periodically with audio rendering stats.
- //
- // |underflow_cb| is executed when the renderer runs out of data to pass to
- // the audio card during playback. ResumeAfterUnderflow() must be called
- // to resume playback. Pause(), Preroll(), or Stop() cancels the underflow
- // condition.
- //
- // |time_cb| is executed whenever time has advanced by way of audio rendering.
- //
- // |ended_cb| is executed when audio rendering has reached the end of stream.
- //
- // |disabled_cb| is executed when audio rendering has been disabled due to
- // external factors (i.e., device was removed). |time_cb| will no longer be
- // executed.
- //
- // |error_cb| is executed if an error was encountered.
- virtual void Initialize(const scoped_refptr<DemuxerStream>& stream,
- const AudioDecoderList& decoders,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
- const TimeCB& time_cb,
- const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
- const PipelineStatusCB& error_cb) = 0;
-
- // Start audio decoding and rendering at the current playback rate, executing
- // |callback| when playback is underway.
- virtual void Play(const base::Closure& callback) = 0;
-
- // Temporarily suspend decoding and rendering audio, executing |callback| when
- // playback has been suspended.
- virtual void Pause(const base::Closure& callback) = 0;
-
- // Discard any audio data, executing |callback| when completed.
- virtual void Flush(const base::Closure& callback) = 0;
-
- // Start prerolling audio data for samples starting at |time|, executing
- // |callback| when completed.
- //
- // Only valid to call after a successful Initialize() or Flush().
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& callback) = 0;
-
- // Stop all operations in preparation for being deleted, executing |callback|
- // when complete.
- virtual void Stop(const base::Closure& callback) = 0;
-
- // Updates the current playback rate.
- virtual void SetPlaybackRate(float playback_rate) = 0;
-
- // Sets the output volume.
- virtual void SetVolume(float volume) = 0;
-
- // Resumes playback after underflow occurs.
- //
- // |buffer_more_audio| is set to true if you want to increase the size of the
- // decoded audio buffer.
- virtual void ResumeAfterUnderflow(bool buffer_more_audio) = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<AudioRenderer>;
-
- AudioRenderer();
- virtual ~AudioRenderer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioRenderer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_RENDERER_H_
diff --git a/src/media/base/audio_renderer_mixer_input.cc b/src/media/base/audio_renderer_mixer_input.cc
deleted file mode 100644
index 569b126..0000000
--- a/src/media/base/audio_renderer_mixer_input.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_renderer_mixer_input.h"
-
-#include "base/logging.h"
-#include "media/base/audio_renderer_mixer.h"
-
-namespace media {
-
-AudioRendererMixerInput::AudioRendererMixerInput(
- const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb)
- : playing_(false),
- initialized_(false),
- volume_(1.0f),
- get_mixer_cb_(get_mixer_cb),
- remove_mixer_cb_(remove_mixer_cb),
- mixer_(NULL),
- callback_(NULL),
- current_audio_delay_milliseconds_(0) {
-}
-
-AudioRendererMixerInput::~AudioRendererMixerInput() {
- // Mixer is no longer safe to use after |remove_mixer_cb_| has been called.
- if (initialized_)
- remove_mixer_cb_.Run(params_);
-}
-
-void AudioRendererMixerInput::Initialize(
- const AudioParameters& params,
- AudioRendererSink::RenderCallback* callback) {
- DCHECK(!initialized_);
- params_ = params;
- mixer_ = get_mixer_cb_.Run(params_);
- callback_ = callback;
- initialized_ = true;
-}
-
-void AudioRendererMixerInput::Start() {
- DCHECK(initialized_);
- DCHECK(!playing_);
-}
-
-void AudioRendererMixerInput::Stop() {
- // Stop() may be called at any time, if Pause() hasn't been called we need to
- // remove our mixer input before shutdown.
- if (!playing_)
- return;
-
- mixer_->RemoveMixerInput(this);
- playing_ = false;
-}
-
-void AudioRendererMixerInput::Play() {
- DCHECK(initialized_);
-
- if (playing_)
- return;
-
- mixer_->AddMixerInput(this);
- playing_ = true;
-}
-
-void AudioRendererMixerInput::Pause(bool /* flush */) {
- DCHECK(initialized_);
-
- if (!playing_)
- return;
-
- mixer_->RemoveMixerInput(this);
- playing_ = false;
-}
-
-bool AudioRendererMixerInput::SetVolume(double volume) {
- volume_ = volume;
- return true;
-}
-
-double AudioRendererMixerInput::ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) {
- int frames_filled = callback_->Render(
- audio_bus,
- current_audio_delay_milliseconds_ + buffer_delay.InMilliseconds());
-
- // AudioConverter expects unfilled frames to be zeroed.
- if (frames_filled < audio_bus->frames()) {
- audio_bus->ZeroFramesPartial(
- frames_filled, audio_bus->frames() - frames_filled);
- }
-
- return frames_filled > 0 ? volume_ : 0;
-}
-
-void AudioRendererMixerInput::OnRenderError() {
- callback_->OnRenderError();
-}
-
-} // namespace media
diff --git a/src/media/base/audio_renderer_mixer_input.h b/src/media/base/audio_renderer_mixer_input.h
deleted file mode 100644
index a08b108..0000000
--- a/src/media/base/audio_renderer_mixer_input.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
-#define MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
-
-#include <vector>
-
-#include "base/callback.h"
-#include "media/base/audio_converter.h"
-#include "media/base/audio_renderer_sink.h"
-
-namespace media {
-
-class AudioRendererMixer;
-
-class MEDIA_EXPORT AudioRendererMixerInput
- : NON_EXPORTED_BASE(public AudioRendererSink),
- public AudioConverter::InputCallback {
- public:
- typedef base::Callback<AudioRendererMixer*(
- const AudioParameters& params)> GetMixerCB;
- typedef base::Callback<void(const AudioParameters& params)> RemoveMixerCB;
-
- AudioRendererMixerInput(
- const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb);
-
- // AudioRendererSink implementation.
- virtual void Start() OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Play() OVERRIDE;
- virtual void Pause(bool flush) OVERRIDE;
- virtual bool SetVolume(double volume) OVERRIDE;
- virtual void Initialize(const AudioParameters& params,
- AudioRendererSink::RenderCallback* renderer) OVERRIDE;
-
- // Called by AudioRendererMixer when new delay information is available.
- void set_audio_delay_milliseconds(int audio_delay_milliseconds) {
- current_audio_delay_milliseconds_ = audio_delay_milliseconds;
- }
-
- // Called by AudioRendererMixer when an error occurs.
- void OnRenderError();
-
- protected:
- virtual ~AudioRendererMixerInput();
-
- private:
- friend class AudioRendererMixerInputTest;
-
- bool playing_;
- bool initialized_;
- double volume_;
-
- // AudioConverter::InputCallback implementation.
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE;
-
- // Callbacks provided during construction which allow AudioRendererMixerInput
- // to retrieve a mixer during Initialize() and notify when it's done with it.
- GetMixerCB get_mixer_cb_;
- RemoveMixerCB remove_mixer_cb_;
-
- // AudioParameters received during Initialize().
- AudioParameters params_;
-
- // AudioRendererMixer provided through |get_mixer_cb_| during Initialize(),
- // guaranteed to live (at least) until |remove_mixer_cb_| is called.
- AudioRendererMixer* mixer_;
-
- // Source of audio data which is provided to the mixer.
- AudioRendererSink::RenderCallback* callback_;
-
- // The current audio delay as last provided by AudioRendererMixer.
- int current_audio_delay_milliseconds_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInput);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
diff --git a/src/media/base/audio_renderer_mixer_input_unittest.cc b/src/media/base/audio_renderer_mixer_input_unittest.cc
deleted file mode 100644
index ad13db1..0000000
--- a/src/media/base/audio_renderer_mixer_input_unittest.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "media/base/audio_renderer_mixer.h"
-#include "media/base/audio_renderer_mixer_input.h"
-#include "media/base/fake_audio_render_callback.h"
-#include "media/base/mock_audio_renderer_sink.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int kBitsPerChannel = 16;
-static const int kSampleRate = 48000;
-static const int kBufferSize = 8192;
-static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
-
-class AudioRendererMixerInputTest : public testing::Test {
- public:
- AudioRendererMixerInputTest() {
- audio_parameters_ = AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, kChannelLayout, kSampleRate,
- kBitsPerChannel, kBufferSize);
-
- CreateMixerInput();
- fake_callback_.reset(new FakeAudioRenderCallback(0));
- mixer_input_->Initialize(audio_parameters_, fake_callback_.get());
- EXPECT_CALL(*this, RemoveMixer(testing::_));
- audio_bus_ = AudioBus::Create(audio_parameters_);
- }
-
- void CreateMixerInput() {
- mixer_input_ = new AudioRendererMixerInput(
- base::Bind(
- &AudioRendererMixerInputTest::GetMixer, base::Unretained(this)),
- base::Bind(
- &AudioRendererMixerInputTest::RemoveMixer, base::Unretained(this)));
- }
-
- AudioRendererMixer* GetMixer(const AudioParameters& params) {
- if (!mixer_.get()) {
- scoped_refptr<MockAudioRendererSink> sink = new MockAudioRendererSink();
- EXPECT_CALL(*sink, Start());
- EXPECT_CALL(*sink, Stop());
-
- mixer_.reset(new AudioRendererMixer(
- audio_parameters_, audio_parameters_, sink));
- }
- return mixer_.get();
- }
-
- double ProvideInput() {
- return mixer_input_->ProvideInput(audio_bus_.get(), base::TimeDelta());
- }
-
- int GetAudioDelayMilliseconds() {
- return mixer_input_->current_audio_delay_milliseconds_;
- }
-
- MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
-
- protected:
- virtual ~AudioRendererMixerInputTest() {}
-
- AudioParameters audio_parameters_;
- scoped_ptr<AudioRendererMixer> mixer_;
- scoped_refptr<AudioRendererMixerInput> mixer_input_;
- scoped_ptr<FakeAudioRenderCallback> fake_callback_;
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest);
-};
-
-// Test that getting and setting the volume work as expected. The volume is
-// returned from ProvideInput() only when playing.
-TEST_F(AudioRendererMixerInputTest, GetSetVolume) {
- mixer_input_->Start();
- mixer_input_->Play();
-
- // Starting volume should be 1.0.
- EXPECT_DOUBLE_EQ(ProvideInput(), 1);
-
- const double kVolume = 0.5;
- EXPECT_TRUE(mixer_input_->SetVolume(kVolume));
- EXPECT_DOUBLE_EQ(ProvideInput(), kVolume);
-
- mixer_input_->Stop();
-}
-
-// Test Start()/Play()/Pause()/Stop()/playing() all work as expected. Also
-// implicitly tests that AddMixerInput() and RemoveMixerInput() work without
-// crashing; functional tests for these methods are in AudioRendererMixerTest.
-TEST_F(AudioRendererMixerInputTest, StartPlayPauseStopPlaying) {
- mixer_input_->Start();
- mixer_input_->Play();
- EXPECT_DOUBLE_EQ(ProvideInput(), 1);
- mixer_input_->Pause(false);
- mixer_input_->Play();
- EXPECT_DOUBLE_EQ(ProvideInput(), 1);
- mixer_input_->Stop();
-}
-
-// Test that Stop() can be called before Initialize() and Start().
-TEST_F(AudioRendererMixerInputTest, StopBeforeInitializeOrStart) {
- // |mixer_input_| was initialized during construction.
- mixer_input_->Stop();
-
- // Verify Stop() works without Initialize() or Start().
- CreateMixerInput();
- mixer_input_->Stop();
-}
-
-} // namespace media
diff --git a/src/media/base/audio_renderer_sink.h b/src/media/base/audio_renderer_sink.h
deleted file mode 100644
index 5043b85..0000000
--- a/src/media/base/audio_renderer_sink.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_RENDERER_SINK_H_
-#define MEDIA_BASE_AUDIO_RENDERER_SINK_H_
-
-#include <vector>
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/media_export.h"
-
-#if defined(OS_STARBOARD)
-#include "starboard/configuration.h"
-#endif // defined(OS_STARBOARD)
-
-namespace media {
-
-// AudioRendererSink is an interface representing the end-point for
-// rendered audio. An implementation is expected to
-// periodically call Render() on a callback object.
-
-class MEDIA_EXPORT AudioRendererSink
- : public base::RefCountedThreadSafe<media::AudioRendererSink> {
- public:
- class MEDIA_EXPORT RenderCallback {
- public:
- // Attempts to completely fill all channels of |dest|, returns actual
- // number of frames filled.
- virtual int Render(AudioBus* dest, int audio_delay_milliseconds) = 0;
-
- // Synchronized audio I/O - see InitializeIO() below.
- virtual void RenderIO(AudioBus* /* source */,
- AudioBus* /* dest */,
- int /* audio_delay_milliseconds */) {}
-
- // Signals an error has occurred.
- virtual void OnRenderError() = 0;
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // Callback from the sink to the renderer to indicate that it is currently
- // full and will not be requesting additional data until some is consumed.
- virtual void SinkFull() = 0;
-
-#if defined(OS_STARBOARD)
-#if SB_IS(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
- // Callback from the sink to the renderer to indicate that it has not
- // enough data to continue playback without playing past the end of
- // buffered data.
- virtual void SinkUnderflow() = 0;
-#endif // SB_IS(MEDIA_UNDERFLOW_DETECTED_BY_AUDIO_SINK)
-#endif // defined(OS_STARBOARD)
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
- protected:
- virtual ~RenderCallback() {}
- };
-
- // Sets important information about the audio stream format.
- // It must be called before any of the other methods.
- virtual void Initialize(const AudioParameters& params,
- RenderCallback* callback) = 0;
-
- // InitializeIO() may be called instead of Initialize() for clients who wish
- // to have synchronized input and output. |input_channels| specifies the
- // number of input channels which will be at the same sample-rate
- // and buffer-size as the output as specified in |params|.
- // The callback's RenderIO() method will be called instead of Render(),
- // providing the synchronized input data at the same time as when new
- // output data is to be rendered.
- virtual void InitializeIO(const AudioParameters& /* params */,
- int /* input_channels */,
- RenderCallback* /* callback */) {}
-
- // Starts audio playback.
- virtual void Start() = 0;
-
- // Stops audio playback.
- virtual void Stop() = 0;
-
- // Pauses playback.
- virtual void Pause(bool flush) = 0;
-
- // Resumes playback after calling Pause().
- virtual void Play() = 0;
-
- // Sets the playback volume, with range [0.0, 1.0] inclusive.
- // Returns |true| on success.
- virtual bool SetVolume(double volume) = 0;
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // To avoid duplication of audio data and additional copies our Sink
- // implementation is responsible for buffering rendered audio. As a
- // result the renderer relays the message to buffer more audio back
- // to the Sink.
- virtual void ResumeAfterUnderflow(bool buffer_more_audio) = 0;
-#endif
-
- protected:
- friend class base::RefCountedThreadSafe<AudioRendererSink>;
- virtual ~AudioRendererSink() {}
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_AUDIO_RENDERER_SINK_H_
diff --git a/src/media/base/audio_splicer.cc b/src/media/base/audio_splicer.cc
deleted file mode 100644
index 2efbba9..0000000
--- a/src/media/base/audio_splicer.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_splicer.h"
-
-#include <cstdlib>
-
-#include "base/logging.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
-
-namespace media {
-
-// Largest gap or overlap allowed by this class. Anything
-// larger than this will trigger an error.
-// This is an arbitrary value, but the initial selection of 50ms
-// roughly represents the duration of 2 compressed AAC or MP3 frames.
-static const int kMaxTimeDeltaInMilliseconds = 50;
-
-AudioSplicer::AudioSplicer(int bytes_per_frame, int samples_per_second)
- : output_timestamp_helper_(bytes_per_frame, samples_per_second),
- min_gap_size_(2 * bytes_per_frame),
- received_end_of_stream_(false) {
-}
-
-AudioSplicer::~AudioSplicer() {
-}
-
-void AudioSplicer::Reset() {
- output_timestamp_helper_.SetBaseTimestamp(kNoTimestamp());
- output_buffers_.clear();
- received_end_of_stream_ = false;
-}
-
-bool AudioSplicer::AddInput(const scoped_refptr<Buffer>& input){
- DCHECK(!received_end_of_stream_ || input->IsEndOfStream());
-
- if (input->IsEndOfStream()) {
- output_buffers_.push_back(input);
- received_end_of_stream_ = true;
- return true;
- }
-
- DCHECK(input->GetTimestamp() != kNoTimestamp());
- DCHECK(input->GetDuration() > base::TimeDelta());
- DCHECK_GT(input->GetDataSize(), 0);
-
- if (output_timestamp_helper_.base_timestamp() == kNoTimestamp())
- output_timestamp_helper_.SetBaseTimestamp(input->GetTimestamp());
-
- if (output_timestamp_helper_.base_timestamp() > input->GetTimestamp()) {
- DVLOG(1) << "Input timestamp is before the base timestamp.";
- return false;
- }
-
- base::TimeDelta timestamp = input->GetTimestamp();
- base::TimeDelta expected_timestamp = output_timestamp_helper_.GetTimestamp();
- base::TimeDelta delta = timestamp - expected_timestamp;
-
- if (std::abs(delta.InMilliseconds()) > kMaxTimeDeltaInMilliseconds) {
- DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
- return false;
- }
-
- int bytes_to_fill = 0;
- if (delta != base::TimeDelta())
- bytes_to_fill = output_timestamp_helper_.GetBytesToTarget(timestamp);
-
- if (bytes_to_fill == 0 || std::abs(bytes_to_fill) < min_gap_size_) {
- AddOutputBuffer(input);
- return true;
- }
-
- if (bytes_to_fill > 0) {
- DVLOG(1) << "Gap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << delta.InMicroseconds() << " us";
-
- // Create a buffer with enough silence samples to fill the gap and
- // add it to the output buffer.
- scoped_refptr<DataBuffer> gap = new DataBuffer(bytes_to_fill);
- gap->SetDataSize(bytes_to_fill);
- memset(gap->GetWritableData(), 0, bytes_to_fill);
- gap->SetTimestamp(expected_timestamp);
- gap->SetDuration(output_timestamp_helper_.GetDuration(bytes_to_fill));
- AddOutputBuffer(gap);
-
- // Add the input buffer now that the gap has been filled.
- AddOutputBuffer(input);
- return true;
- }
-
- int bytes_to_skip = -bytes_to_fill;
-
- DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << -delta.InMicroseconds() << " us";
-
- if (input->GetDataSize() <= bytes_to_skip) {
- DVLOG(1) << "Dropping whole buffer";
- return true;
- }
-
- // Copy the trailing samples that do not overlap samples already output
- // into a new buffer. Add this new buffer to the output queue.
- //
- // TODO(acolwell): Implement a cross-fade here so the transition is less
- // jarring.
- int new_buffer_size = input->GetDataSize() - bytes_to_skip;
-
- scoped_refptr<DataBuffer> new_buffer = new DataBuffer(new_buffer_size);
- new_buffer->SetDataSize(new_buffer_size);
- memcpy(new_buffer->GetWritableData(),
- input->GetData() + bytes_to_skip,
- new_buffer_size);
- new_buffer->SetTimestamp(expected_timestamp);
- new_buffer->SetDuration(
- output_timestamp_helper_.GetDuration(new_buffer_size));
- AddOutputBuffer(new_buffer);
- return true;
-}
-
-bool AudioSplicer::HasNextBuffer() const {
- return !output_buffers_.empty();
-}
-
-scoped_refptr<Buffer> AudioSplicer::GetNextBuffer() {
- scoped_refptr<Buffer> ret = output_buffers_.front();
- output_buffers_.pop_front();
- return ret;
-}
-
-void AudioSplicer::AddOutputBuffer(const scoped_refptr<Buffer>& buffer) {
- output_timestamp_helper_.AddBytes(buffer->GetDataSize());
- output_buffers_.push_back(buffer);
-}
-
-} // namespace media
diff --git a/src/media/base/audio_splicer.h b/src/media/base/audio_splicer.h
deleted file mode 100644
index aa97fae..0000000
--- a/src/media/base/audio_splicer.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_SPLICER_H_
-#define MEDIA_BASE_AUDIO_SPLICER_H_
-
-#include <deque>
-
-#include "base/memory/ref_counted.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioDecoderConfig;
-class Buffer;
-
-// Helper class that handles filling gaps and resolving overlaps.
-class MEDIA_EXPORT AudioSplicer {
- public:
- AudioSplicer(int bytes_per_frame, int samples_per_second);
- ~AudioSplicer();
-
- // Resets the splicer state by clearing the output buffers queue,
- // and resetting the timestamp helper.
- void Reset();
-
- // Adds a new buffer full of samples or end of stream buffer to the splicer.
- // Returns true if the buffer was accepted. False is returned if an error
- // occurred.
- bool AddInput(const scoped_refptr<Buffer>& input);
-
- // Returns true if the splicer has a buffer to return.
- bool HasNextBuffer() const;
-
- // Removes the next buffer from the output buffer queue and returns it.
- // This should only be called if HasNextBuffer() returns true.
- scoped_refptr<Buffer> GetNextBuffer();
-
- private:
- void AddOutputBuffer(const scoped_refptr<Buffer>& buffer);
-
- AudioTimestampHelper output_timestamp_helper_;
-
- // Minimum gap size needed before the splicer will take action to
- // fill a gap. This avoids periodically inserting and then dropping samples
- // when the buffer timestamps are slightly off because of timestamp rounding
- // in the source content.
- int min_gap_size_;
-
- std::deque<scoped_refptr<Buffer> > output_buffers_;
- bool received_end_of_stream_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSplicer);
-};
-
-} // namespace media
-
-#endif
diff --git a/src/media/base/audio_splicer_unittest.cc b/src/media/base/audio_splicer_unittest.cc
deleted file mode 100644
index 2096789..0000000
--- a/src/media/base/audio_splicer_unittest.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-#include "media/base/audio_splicer.h"
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
-#include "media/base/data_buffer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int kBytesPerFrame = 4;
-static const int kDefaultSampleRate = 44100;
-static const int kDefaultBufferSize = 100 * kBytesPerFrame;
-
-class AudioSplicerTest : public ::testing::Test {
- public:
- AudioSplicerTest()
- : splicer_(kBytesPerFrame, kDefaultSampleRate),
- input_timestamp_helper_(kBytesPerFrame, kDefaultSampleRate) {
- input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta());
- }
-
- scoped_refptr<Buffer> GetNextInputBuffer(uint8 value) {
- return GetNextInputBuffer(value, kDefaultBufferSize);
- }
-
- scoped_refptr<Buffer> GetNextInputBuffer(uint8 value, int size) {
- scoped_refptr<DataBuffer> buffer = new DataBuffer(size);
- buffer->SetDataSize(size);
- memset(buffer->GetWritableData(), value, buffer->GetDataSize());
- buffer->SetTimestamp(input_timestamp_helper_.GetTimestamp());
- buffer->SetDuration(
- input_timestamp_helper_.GetDuration(buffer->GetDataSize()));
- input_timestamp_helper_.AddBytes(buffer->GetDataSize());
- return buffer;
- }
-
- bool VerifyData(const uint8* data, int size, int value) {
- for (int i = 0; i < size; ++i) {
- if (data[i] != value)
- return false;
- }
- return true;
- }
-
- protected:
- AudioSplicer splicer_;
- AudioTimestampHelper input_timestamp_helper_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioSplicerTest);
-};
-
-TEST_F(AudioSplicerTest, PassThru) {
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Test single buffer pass-thru behavior.
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
-
- // Test that multiple buffers can be queued in the splicer.
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2);
- scoped_refptr<Buffer> input_3 = GetNextInputBuffer(3);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_TRUE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->GetTimestamp(), output_2->GetTimestamp());
- EXPECT_EQ(input_2->GetDuration(), output_2->GetDuration());
- EXPECT_EQ(input_2->GetDataSize(), output_2->GetDataSize());
-
- scoped_refptr<Buffer> output_3 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->GetTimestamp(), output_3->GetTimestamp());
- EXPECT_EQ(input_3->GetDuration(), output_3->GetDuration());
- EXPECT_EQ(input_3->GetDataSize(), output_3->GetDataSize());
-}
-
-TEST_F(AudioSplicerTest, Reset) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- splicer_.Reset();
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Add some bytes to the timestamp helper so that the
- // next buffer starts many frames beyond the end of
- // |input_1|. This is to make sure that Reset() actually
- // clears its state and doesn't try to insert a gap.
- input_timestamp_helper_.AddBytes(100 * kBytesPerFrame);
-
- // Verify that a new input buffer passes through as expected.
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2);
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_2->GetTimestamp(), output_2->GetTimestamp());
- EXPECT_EQ(input_2->GetDuration(), output_2->GetDuration());
- EXPECT_EQ(input_2->GetDataSize(), output_2->GetDataSize());
-}
-
-TEST_F(AudioSplicerTest, EndOfStream) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
- scoped_refptr<Buffer> input_2 = new DataBuffer(0); // End of stream.
- scoped_refptr<Buffer> input_3 = GetNextInputBuffer(2);
- EXPECT_TRUE(input_2->IsEndOfStream());
-
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.HasNextBuffer());
-
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
-
- EXPECT_TRUE(output_2->IsEndOfStream());
-
- // Verify that buffers can be added again after Reset().
- splicer_.Reset();
- EXPECT_TRUE(splicer_.AddInput(input_3));
- scoped_refptr<Buffer> output_3 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->GetTimestamp(), output_3->GetTimestamp());
- EXPECT_EQ(input_3->GetDuration(), output_3->GetDuration());
- EXPECT_EQ(input_3->GetDataSize(), output_3->GetDataSize());
-}
-
-
-// Test the gap insertion code.
-// +--------------+ +--------------+
-// |11111111111111| |22222222222222|
-// +--------------+ +--------------+
-// Results in:
-// +--------------+----+--------------+
-// |11111111111111|0000|22222222222222|
-// +--------------+----+--------------+
-TEST_F(AudioSplicerTest, GapInsertion) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
-
- // Add bytes to the timestamp helper so that the next buffer
- // will have a starting timestamp that indicates a gap is
- // present.
- const int kGapSize = 7 * kBytesPerFrame;
- input_timestamp_helper_.AddBytes(kGapSize);
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2);
-
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
-
- // Verify that a gap buffer is generated.
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- scoped_refptr<Buffer> output_3 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
- EXPECT_TRUE(VerifyData(output_1->GetData(), output_1->GetDataSize(), 1));
-
- // Verify the contents of the gap buffer.
- base::TimeDelta gap_timestamp =
- input_1->GetTimestamp() + input_1->GetDuration();
- base::TimeDelta gap_duration = input_2->GetTimestamp() - gap_timestamp;
- EXPECT_GT(gap_duration, base::TimeDelta());
- EXPECT_EQ(gap_timestamp, output_2->GetTimestamp());
- EXPECT_EQ(gap_duration, output_2->GetDuration());
- EXPECT_EQ(kGapSize, output_2->GetDataSize());
- EXPECT_TRUE(VerifyData(output_2->GetData(), output_2->GetDataSize(), 0));
-
- // Verify that the second input buffer passed through unmodified.
- EXPECT_EQ(input_2->GetTimestamp(), output_3->GetTimestamp());
- EXPECT_EQ(input_2->GetDuration(), output_3->GetDuration());
- EXPECT_EQ(input_2->GetDataSize(), output_3->GetDataSize());
- EXPECT_TRUE(VerifyData(output_3->GetData(), output_3->GetDataSize(), 2));
-}
-
-
-// Test that an error is signalled when the gap between input buffers is
-// too large.
-TEST_F(AudioSplicerTest, GapTooLarge) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
-
- // Add a seconds worth of bytes so that an unacceptably large
- // gap exists between |input_1| and |input_2|.
- const int kGapSize = kDefaultSampleRate * kBytesPerFrame;
- input_timestamp_helper_.AddBytes(kGapSize);
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2);
-
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
-
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
- EXPECT_TRUE(VerifyData(output_1->GetData(), output_1->GetDataSize(), 1));
-
- // Verify that the second buffer is not available.
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Reset the timestamp helper so it can generate a buffer that is
- // right after |input_1|.
- input_timestamp_helper_.SetBaseTimestamp(
- input_1->GetTimestamp() + input_1->GetDuration());
-
- // Verify that valid buffers are still accepted.
- scoped_refptr<Buffer> input_3 = GetNextInputBuffer(3);
- EXPECT_TRUE(splicer_.AddInput(input_3));
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
- EXPECT_EQ(input_3->GetTimestamp(), output_2->GetTimestamp());
- EXPECT_EQ(input_3->GetDuration(), output_2->GetDuration());
- EXPECT_EQ(input_3->GetDataSize(), output_2->GetDataSize());
- EXPECT_TRUE(VerifyData(output_2->GetData(), output_2->GetDataSize(), 3));
-}
-
-
-// Verifies that an error is signalled if AddInput() is called
-// with a timestamp that is earlier than the first buffer added.
-TEST_F(AudioSplicerTest, BufferAddedBeforeBase) {
- input_timestamp_helper_.SetBaseTimestamp(
- base::TimeDelta::FromMicroseconds(10));
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
-
- // Reset the timestamp helper so the next buffer will have a timestamp earlier
- // than |input_1|.
- input_timestamp_helper_.SetBaseTimestamp(base::TimeDelta::FromSeconds(0));
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(1);
-
- EXPECT_GT(input_1->GetTimestamp(), input_2->GetTimestamp());
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_FALSE(splicer_.AddInput(input_2));
-}
-
-
-// Test when one buffer partially overlaps another.
-// +--------------+
-// |11111111111111|
-// +--------------+
-// +--------------+
-// |22222222222222|
-// +--------------+
-// Results in:
-// +--------------+----------+
-// |11111111111111|2222222222|
-// +--------------+----------+
-TEST_F(AudioSplicerTest, PartialOverlap) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
-
- // Reset timestamp helper so that the next buffer will have a
- // timestamp that starts in the middle of |input_1|.
- const int kOverlapSize = input_1->GetDataSize() / 4;
- input_timestamp_helper_.SetBaseTimestamp(input_1->GetTimestamp());
- input_timestamp_helper_.AddBytes(input_1->GetDataSize() - kOverlapSize);
-
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2);
-
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
-
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
- EXPECT_TRUE(VerifyData(output_1->GetData(), output_1->GetDataSize(), 1));
-
-
- // Verify that the second input buffer was truncated to only contain
- // the samples that are after the end of |input_1|.
- base::TimeDelta expected_timestamp =
- input_1->GetTimestamp() + input_1->GetDuration();
- base::TimeDelta expected_duration =
- (input_2->GetTimestamp() + input_2->GetDuration()) - expected_timestamp;
- EXPECT_EQ(expected_timestamp, output_2->GetTimestamp());
- EXPECT_EQ(expected_duration, output_2->GetDuration());
- EXPECT_EQ(input_2->GetDataSize() - kOverlapSize, output_2->GetDataSize());
- EXPECT_TRUE(VerifyData(output_2->GetData(), output_2->GetDataSize(), 2));
-}
-
-
-// Test that an input buffer that is completely overlapped by a buffer
-// that was already added is dropped.
-// +--------------+
-// |11111111111111|
-// +--------------+
-// +-----+
-// |22222|
-// +-----+
-// +-------------+
-// |3333333333333|
-// +-------------+
-// Results in:
-// +--------------+-------------+
-// |11111111111111|3333333333333|
-// +--------------+-------------+
-TEST_F(AudioSplicerTest, DropBuffer) {
- scoped_refptr<Buffer> input_1 = GetNextInputBuffer(1);
-
- // Reset timestamp helper so that the next buffer will have a
- // timestamp that starts in the middle of |input_1|.
- const int kOverlapOffset = input_1->GetDataSize() / 2;
- const int kOverlapSize = input_1->GetDataSize() / 4;
- input_timestamp_helper_.SetBaseTimestamp(input_1->GetTimestamp());
- input_timestamp_helper_.AddBytes(kOverlapOffset);
-
- scoped_refptr<Buffer> input_2 = GetNextInputBuffer(2, kOverlapSize);
-
- // Reset the timestamp helper so the next buffer will be right after
- // |input_1|.
- input_timestamp_helper_.SetBaseTimestamp(input_1->GetTimestamp());
- input_timestamp_helper_.AddBytes(input_1->GetDataSize());
- scoped_refptr<Buffer> input_3 = GetNextInputBuffer(3);
-
- EXPECT_TRUE(splicer_.AddInput(input_1));
- EXPECT_TRUE(splicer_.AddInput(input_2));
- EXPECT_TRUE(splicer_.AddInput(input_3));
-
- EXPECT_TRUE(splicer_.HasNextBuffer());
- scoped_refptr<Buffer> output_1 = splicer_.GetNextBuffer();
- scoped_refptr<Buffer> output_2 = splicer_.GetNextBuffer();
- EXPECT_FALSE(splicer_.HasNextBuffer());
-
- // Verify that the first input buffer passed through unmodified.
- EXPECT_EQ(input_1->GetTimestamp(), output_1->GetTimestamp());
- EXPECT_EQ(input_1->GetDuration(), output_1->GetDuration());
- EXPECT_EQ(input_1->GetDataSize(), output_1->GetDataSize());
- EXPECT_TRUE(VerifyData(output_1->GetData(), output_1->GetDataSize(), 1));
-
- // Verify that the second output buffer only contains
- // the samples that are in |input_3|.
- EXPECT_EQ(input_3->GetTimestamp(), output_2->GetTimestamp());
- EXPECT_EQ(input_3->GetDuration(), output_2->GetDuration());
- EXPECT_EQ(input_3->GetDataSize(), output_2->GetDataSize());
- EXPECT_TRUE(VerifyData(output_2->GetData(), output_2->GetDataSize(), 3));
-}
-
-} // namespace media
diff --git a/src/media/base/audio_timestamp_helper.cc b/src/media/base/audio_timestamp_helper.cc
deleted file mode 100644
index a3f37c4..0000000
--- a/src/media/base/audio_timestamp_helper.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_timestamp_helper.h"
-
-#include "base/logging.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-AudioTimestampHelper::AudioTimestampHelper(int bytes_per_frame,
- int samples_per_second)
- : bytes_per_frame_(bytes_per_frame),
- base_timestamp_(kNoTimestamp()),
- frame_count_(0) {
- DCHECK_GT(bytes_per_frame, 0);
- DCHECK_GT(samples_per_second, 0);
- double fps = samples_per_second;
- microseconds_per_frame_ = base::Time::kMicrosecondsPerSecond / fps;
-}
-
-void AudioTimestampHelper::SetBaseTimestamp(base::TimeDelta base_timestamp) {
- base_timestamp_ = base_timestamp;
- frame_count_ = 0;
-}
-
-base::TimeDelta AudioTimestampHelper::base_timestamp() const {
- return base_timestamp_;
-}
-
-void AudioTimestampHelper::AddBytes(int byte_count) {
- DCHECK_GE(byte_count, 0);
- DCHECK(base_timestamp_ != kNoTimestamp());
- DCHECK_EQ(byte_count % bytes_per_frame_, 0);
- frame_count_ += byte_count / bytes_per_frame_;
-}
-
-base::TimeDelta AudioTimestampHelper::GetTimestamp() const {
- return ComputeTimestamp(frame_count_);
-}
-
-base::TimeDelta AudioTimestampHelper::GetDuration(int byte_count) const {
- DCHECK_GE(byte_count, 0);
- DCHECK_EQ(byte_count % bytes_per_frame_, 0);
- int frames = byte_count / bytes_per_frame_;
- base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frames);
- return end_timestamp - GetTimestamp();
-}
-
-int64 AudioTimestampHelper::GetBytesToTarget(
- base::TimeDelta target) const {
- DCHECK(base_timestamp_ != kNoTimestamp());
- DCHECK(target >= base_timestamp_);
-
- int64 delta_in_us = (target - GetTimestamp()).InMicroseconds();
- if (delta_in_us == 0)
- return 0;
-
- // Compute a timestamp relative to |base_timestamp_| since timestamps
- // created from |frame_count_| are computed relative to this base.
- // This ensures that the time to frame computation here is the proper inverse
- // of the frame to time computation in ComputeTimestamp().
- base::TimeDelta delta_from_base = target - base_timestamp_;
-
- // Compute frame count for the time delta. This computation rounds to
- // the nearest whole number of frames.
- double threshold = microseconds_per_frame_ / 2;
- int64 target_frame_count =
- (delta_from_base.InMicroseconds() + threshold) / microseconds_per_frame_;
- return bytes_per_frame_ * (target_frame_count - frame_count_);
-}
-
-base::TimeDelta AudioTimestampHelper::ComputeTimestamp(
- int64 frame_count) const {
- DCHECK_GE(frame_count, 0);
- DCHECK(base_timestamp_ != kNoTimestamp());
- double frames_us = microseconds_per_frame_ * frame_count;
- return base_timestamp_ + base::TimeDelta::FromMicroseconds(frames_us);
-}
-
-} // namespace media
diff --git a/src/media/base/audio_timestamp_helper.h b/src/media/base/audio_timestamp_helper.h
deleted file mode 100644
index 4b38be7..0000000
--- a/src/media/base/audio_timestamp_helper.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
-#define MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
-
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Generates timestamps for a sequence of audio sample bytes. This class should
-// be used any place timestamps need to be calculated for a sequence of audio
-// samples. It helps avoid timestamps inaccuracies caused by rounding/truncation
-// in repeated sample count to timestamp conversions.
-//
-// The class is constructed with bytes per frame and samples_per_second
-// information so that it can convert audio sample byte counts into timestamps.
-// After the object is constructed, SetBaseTimestamp() must be called to specify
-// the starting timestamp of the audio sequence. As audio samples are received,
-// their byte counts are added to AddBytes(). These byte counts are
-// accumulated by this class so GetTimestamp() can be used to determine the
-// timestamp for the samples that have been added. GetDuration() calculates
-// the proper duration values for samples added to the current timestamp.
-// GetBytesToTarget() determines the number of bytes that need to be
-// added/removed from the accumulated bytes to reach a target timestamp.
-class MEDIA_EXPORT AudioTimestampHelper {
- public:
- AudioTimestampHelper(int bytes_per_frame, int samples_per_second);
-
- // Sets the base timestamp to |base_timestamp| and the sets count to 0.
- void SetBaseTimestamp(base::TimeDelta base_timestamp);
-
- base::TimeDelta base_timestamp() const;
-
- // Adds sample bytes to the frame counter.
- //
- // Note: SetBaseTimestamp() must be called with a value other than
- // kNoTimestamp() before this method can be called.
- void AddBytes(int byte_count);
-
- // Get the current timestamp. This value is computed from the base_timestamp()
- // and the number of sample bytes that have been added so far.
- base::TimeDelta GetTimestamp() const;
-
- // Gets the duration if |byte_count| bytes were added to the current
- // timestamp reported by GetTimestamp(). This method ensures that
- // (GetTimestamp() + GetDuration(n)) will equal the timestamp that
- // GetTimestamp() will return if AddBytes(n) is called.
- base::TimeDelta GetDuration(int byte_count) const;
-
- // Returns the number of bytes needed to reach the target timestamp.
- //
- // Note: |target| must be >= |base_timestamp_|.
- int64 GetBytesToTarget(base::TimeDelta target) const;
-
- private:
- base::TimeDelta ComputeTimestamp(int64 frame_count) const;
-
- int bytes_per_frame_;
- double microseconds_per_frame_;
-
- base::TimeDelta base_timestamp_;
-
- // Number of frames accumulated by byte counts passed to AddBytes() calls.
- int64 frame_count_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTimestampHelper);
-};
-
-} // namespace media
-
-#endif
diff --git a/src/media/base/audio_timestamp_helper_unittest.cc b/src/media/base/audio_timestamp_helper_unittest.cc
deleted file mode 100644
index 5f5bb4e..0000000
--- a/src/media/base/audio_timestamp_helper_unittest.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int kBytesPerFrame = 4;
-static const int kDefaultSampleRate = 44100;
-
-class AudioTimestampHelperTest : public ::testing::Test {
- public:
- AudioTimestampHelperTest()
- : helper_(kBytesPerFrame, kDefaultSampleRate) {
- helper_.SetBaseTimestamp(base::TimeDelta());
- }
-
- // Adds bytes to the helper and returns the current timestamp in microseconds.
- int64 AddBytes(int bytes) {
- helper_.AddBytes(bytes);
- return helper_.GetTimestamp().InMicroseconds();
- }
-
- int64 BytesToTarget(int target_in_microseconds) {
- return helper_.GetBytesToTarget(
- base::TimeDelta::FromMicroseconds(target_in_microseconds));
- }
-
- void TestGetBytesToTargetRange(int byte_count, int start, int end) {
- for (int i = start; i <= end; ++i)
- EXPECT_EQ(byte_count,BytesToTarget(i)) << " Failure for timestamp "
- << i << " us.";
- }
-
- protected:
- AudioTimestampHelper helper_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioTimestampHelperTest);
-};
-
-TEST_F(AudioTimestampHelperTest, Basic) {
- EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
-
- // Verify that the output timestamp is always rounded down to the
- // nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds,
- // which is why the timestamp sometimes increments by 23 microseconds
- // and other times it increments by 22 microseconds.
- EXPECT_EQ(0, AddBytes(0));
- EXPECT_EQ(22, AddBytes(kBytesPerFrame));
- EXPECT_EQ(45, AddBytes(kBytesPerFrame));
- EXPECT_EQ(68, AddBytes(kBytesPerFrame));
- EXPECT_EQ(90, AddBytes(kBytesPerFrame));
- EXPECT_EQ(113, AddBytes(kBytesPerFrame));
-
- // Verify that adding bytes one frame at a time matches the timestamp returned
- // if the same number of bytes are added all at once.
- base::TimeDelta timestamp_1 = helper_.GetTimestamp();
- helper_.SetBaseTimestamp(kNoTimestamp());
- EXPECT_TRUE(kNoTimestamp() == helper_.base_timestamp());
- helper_.SetBaseTimestamp(base::TimeDelta());
- EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
-
- helper_.AddBytes(5 * kBytesPerFrame);
- EXPECT_EQ(113, helper_.GetTimestamp().InMicroseconds());
- EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp());
-}
-
-
-TEST_F(AudioTimestampHelperTest, GetDuration) {
- helper_.SetBaseTimestamp(base::TimeDelta::FromMicroseconds(100));
-
- int byte_count = 5 * kBytesPerFrame;
- int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 };
- for (size_t i = 0; i < arraysize(expected_durations); ++i) {
- base::TimeDelta duration = helper_.GetDuration(byte_count);
- EXPECT_EQ(expected_durations[i], duration.InMicroseconds());
-
- base::TimeDelta timestamp_1 = helper_.GetTimestamp() + duration;
- helper_.AddBytes(byte_count);
- base::TimeDelta timestamp_2 = helper_.GetTimestamp();
- EXPECT_TRUE(timestamp_1 == timestamp_2);
- }
-}
-
-TEST_F(AudioTimestampHelperTest, GetBytesToTarget) {
- // Verify GetBytesToTarget() rounding behavior.
- // 1 frame @ 44100 is ~22.67573 microseconds,
-
- // Test values less than half of the frame duration.
- TestGetBytesToTargetRange(0, 0, 11);
-
- // Test values between half the frame duration & the
- // full frame duration.
- TestGetBytesToTargetRange(kBytesPerFrame, 12, 22);
-
- // Verify that the same number of bytes is returned up
- // to the next half a frame.
- TestGetBytesToTargetRange(kBytesPerFrame, 23, 34);
-
- // Verify the next 3 ranges.
- TestGetBytesToTargetRange(2 * kBytesPerFrame, 35, 56);
- TestGetBytesToTargetRange(3 * kBytesPerFrame, 57, 79);
- TestGetBytesToTargetRange(4 * kBytesPerFrame, 80, 102);
- TestGetBytesToTargetRange(5 * kBytesPerFrame, 103, 124);
-
-
- // Add bytes to the helper so negative byte counts can
- // be tested.
- helper_.AddBytes(5 * kBytesPerFrame);
-
- // Note: The timestamp ranges must match the positive values
- // tested above to verify that the code is rounding properly.
- TestGetBytesToTargetRange(0 * kBytesPerFrame, 103, 124);
- TestGetBytesToTargetRange(-1 * kBytesPerFrame, 80, 102);
- TestGetBytesToTargetRange(-2 * kBytesPerFrame, 57, 79);
- TestGetBytesToTargetRange(-3 * kBytesPerFrame, 35, 56);
- TestGetBytesToTargetRange(-4 * kBytesPerFrame, 12, 34);
- TestGetBytesToTargetRange(-5 * kBytesPerFrame, 0, 11);
-}
-
-} // namespace media
diff --git a/src/media/base/bind_to_loop.h b/src/media/base/bind_to_loop.h
deleted file mode 100644
index 9938fad..0000000
--- a/src/media/base/bind_to_loop.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// This file was GENERATED by command:
-// pump.py bind_to_loop.h.pump
-// DO NOT EDIT BY HAND!!!
-
-
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
-#define MEDIA_BASE_BIND_TO_LOOP_H_
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop_proxy.h"
-
-// This is a helper utility for base::Bind()ing callbacks on to particular
-// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
-// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
-// when |b| executes the callback, it does so on a particular MessageLoop.
-//
-// Typical usage: request to be called back on the current thread:
-// other->StartAsyncProcessAndCallMeBack(
-// media::BindToLoop(MessageLoopProxy::current(),
-// base::Bind(&MyClass::MyMethod, this)));
-//
-// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
-// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
-// and thus can't be used with arrays.
-
-namespace media {
-
-// Mimic base::internal::CallbackForward, replacing p.Pass() with
-// base::Passed(&p) to account for the extra layer of indirection.
-namespace internal {
-template <typename T>
-T& TrampolineForward(T& t) { return t; }
-
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<scoped_array<T> > TrampolineForward(
- scoped_array<T>& p) { return base::Passed(&p); }
-
-template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
- ScopedVector<T>& p) { return base::Passed(&p); }
-
-template <typename T> struct TrampolineHelper;
-
-template <>
-struct TrampolineHelper<void()> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void()>& cb) {
- loop->PostTask(FROM_HERE, base::Bind(cb));
- }
-};
-
-
-template <typename A1>
-struct TrampolineHelper<void(A1)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1)>& cb, A1 a1) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1)));
- }
-};
-
-
-template <typename A1, typename A2>
-struct TrampolineHelper<void(A1, A2)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2)>& cb, A1 a1, A2 a2) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3>
-struct TrampolineHelper<void(A1, A2, A3)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3)>& cb, A1 a1, A2 a2, A3 a3) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4>
-struct TrampolineHelper<void(A1, A2, A3, A4)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5)>& cb, A1 a1, A2 a2, A3 a3,
- A4 a4, A5 a5) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5, A6)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5),
- internal::TrampolineForward(a6)));
- }
-};
-
-
-template <typename A1, typename A2, typename A3, typename A4, typename A5,
- typename A6, typename A7>
-struct TrampolineHelper<void(A1, A2, A3, A4, A5, A6, A7)> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void(A1, A2, A3, A4, A5, A6, A7)>& cb, A1 a1, A2 a2,
- A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
- loop->PostTask(FROM_HERE, base::Bind(cb, internal::TrampolineForward(a1),
- internal::TrampolineForward(a2), internal::TrampolineForward(a3),
- internal::TrampolineForward(a4), internal::TrampolineForward(a5),
- internal::TrampolineForward(a6), internal::TrampolineForward(a7)));
- }
-};
-
-
-} // namespace internal
-
-template<typename T>
-static base::Callback<T> BindToLoop(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<T>& cb) {
- return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
-}
-
-template<typename T>
-static base::Callback<T> BindToCurrentLoop(
- const base::Callback<T>& cb) {
- return BindToLoop(base::MessageLoopProxy::current(), cb);
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_BIND_TO_LOOP_H_
diff --git a/src/media/base/bind_to_loop.h.pump b/src/media/base/bind_to_loop.h.pump
deleted file mode 100644
index 1a1ae12..0000000
--- a/src/media/base/bind_to_loop.h.pump
+++ /dev/null
@@ -1,100 +0,0 @@
-$$ This is a pump file for generating file templates. Pump is a python
-$$ script that is part of the Google Test suite of utilities. Description
-$$ can be found here:
-$$
-$$ http://code.google.com/p/googletest/wiki/PumpManual
-$$
-
-$$ See comment for MAX_ARITY in base/bind.h.pump.
-$var MAX_ARITY = 7
-
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BIND_TO_LOOP_H_
-#define MEDIA_BASE_BIND_TO_LOOP_H_
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/message_loop_proxy.h"
-
-// This is a helper utility for base::Bind()ing callbacks on to particular
-// MessageLoops. A typical use is when |a| (of class |A|) wants to hand a
-// callback such as base::Bind(&A::AMethod, a) to |b|, but needs to ensure that
-// when |b| executes the callback, it does so on a particular MessageLoop.
-//
-// Typical usage: request to be called back on the current thread:
-// other->StartAsyncProcessAndCallMeBack(
-// media::BindToLoop(MessageLoopProxy::current(),
-// base::Bind(&MyClass::MyMethod, this)));
-//
-// Note that like base::Bind(), BindToLoop() can't bind non-constant references,
-// and that *unlike* base::Bind(), BindToLoop() makes copies of its arguments,
-// and thus can't be used with arrays.
-
-namespace media {
-
-// Mimic base::internal::CallbackForward, replacing p.Pass() with
-// base::Passed(&p) to account for the extra layer of indirection.
-namespace internal {
-template <typename T>
-T& TrampolineForward(T& t) { return t; }
-
-template <typename T>
-base::internal::PassedWrapper<scoped_ptr<T> > TrampolineForward(
- scoped_ptr<T>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<scoped_array<T> > TrampolineForward(
- scoped_array<T>& p) { return base::Passed(&p); }
-
-template <typename T, typename R>
-base::internal::PassedWrapper<scoped_ptr_malloc<T, R> > TrampolineForward(
- scoped_ptr_malloc<T, R>& p) { return base::Passed(&p); }
-
-template <typename T>
-base::internal::PassedWrapper<ScopedVector<T> > TrampolineForward(
- ScopedVector<T>& p) { return base::Passed(&p); }
-
-template <typename T> struct TrampolineHelper;
-
-$range ARITY 0..MAX_ARITY
-$for ARITY [[
-$range ARG 1..ARITY
-
-template <$for ARG , [[typename A$(ARG)]]>
-struct TrampolineHelper<void($for ARG , [[A$(ARG)]])> {
- static void Run(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<void($for ARG , [[A$(ARG)]])>& cb
-$if ARITY != 0 [[, ]]
-$for ARG , [[A$(ARG) a$(ARG)]]
-) {
- loop->PostTask(FROM_HERE, base::Bind(cb
-$if ARITY != 0 [[, ]]
-$for ARG , [[internal::TrampolineForward(a$(ARG))]]));
- }
-};
-
-
-]] $$ for ARITY
-
-} // namespace internal
-
-template<typename T>
-static base::Callback<T> BindToLoop(
- const scoped_refptr<base::MessageLoopProxy>& loop,
- const base::Callback<T>& cb) {
- return base::Bind(&internal::TrampolineHelper<T>::Run, loop, cb);
-}
-
-template<typename T>
-static base::Callback<T> BindToCurrentLoop(
- const base::Callback<T>& cb) {
- return BindToLoop(base::MessageLoopProxy::current(), cb);
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_BIND_TO_LOOP_H_
diff --git a/src/media/base/bind_to_loop_unittest.cc b/src/media/base/bind_to_loop_unittest.cc
deleted file mode 100644
index 214147d..0000000
--- a/src/media/base/bind_to_loop_unittest.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/bind_to_loop.h"
-
-#include "base/message_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-void BoundBoolSet(bool* var, bool val) {
- *var = val;
-}
-
-void BoundBoolSetFromScopedPtr(bool* var, scoped_ptr<bool> val) {
- *var = *val;
-}
-
-void BoundBoolSetFromScopedPtrMalloc(bool* var, scoped_ptr_malloc<bool> val) {
- *var = val;
-}
-
-void BoundBoolSetFromScopedArray(bool* var, scoped_array<bool> val) {
- *var = val[0];
-}
-
-void BoundBoolSetFromConstRef(bool* var, const bool& val) {
- *var = val;
-}
-
-void BoundIntegersSet(int* a_var, int* b_var, int a_val, int b_val) {
- *a_var = a_val;
- *b_var = b_val;
-}
-
-// Various tests that check that the bound function is only actually executed
-// on the message loop, not during the original Run.
-class BindToLoopTest : public ::testing::Test {
- public:
- BindToLoopTest() : proxy_(loop_.message_loop_proxy()) {}
-
- protected:
- MessageLoop loop_;
- scoped_refptr<base::MessageLoopProxy> proxy_;
-};
-
-TEST_F(BindToLoopTest, Closure) {
- // Test the closure is run inside the loop, not outside it.
- base::WaitableEvent waiter(false, false);
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &base::WaitableEvent::Signal, base::Unretained(&waiter)));
- cb.Run();
- EXPECT_FALSE(waiter.IsSignaled());
- loop_.RunUntilIdle();
- EXPECT_TRUE(waiter.IsSignaled());
-}
-
-TEST_F(BindToLoopTest, Bool) {
- bool bool_var = false;
- base::Callback<void(bool)> cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSet, &bool_var));
- cb.Run(true);
- EXPECT_FALSE(bool_var);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_var);
-}
-
-TEST_F(BindToLoopTest, BoundScopedPtrBool) {
- bool bool_val = false;
- scoped_ptr<bool> scoped_ptr_bool(new bool(true));
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedPtr, &bool_val, base::Passed(&scoped_ptr_bool)));
- cb.Run();
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, PassedScopedPtrBool) {
- bool bool_val = false;
- scoped_ptr<bool> scoped_ptr_bool(new bool(true));
- base::Callback<void(scoped_ptr<bool>)> cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedPtr, &bool_val));
- cb.Run(scoped_ptr_bool.Pass());
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, BoundScopedArrayBool) {
- bool bool_val = false;
- scoped_array<bool> scoped_array_bool(new bool[1]);
- scoped_array_bool[0] = true;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedArray, &bool_val,
- base::Passed(&scoped_array_bool)));
- cb.Run();
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, PassedScopedArrayBool) {
- bool bool_val = false;
- scoped_array<bool> scoped_array_bool(new bool[1]);
- scoped_array_bool[0] = true;
- base::Callback<void(scoped_array<bool>)> cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedArray, &bool_val));
- cb.Run(scoped_array_bool.Pass());
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, BoundScopedPtrMallocBool) {
- bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
- static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromScopedPtrMalloc, &bool_val,
- base::Passed(&scoped_ptr_malloc_bool)));
- cb.Run();
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, PassedScopedPtrMallocBool) {
- bool bool_val = false;
- scoped_ptr_malloc<bool> scoped_ptr_malloc_bool(
- static_cast<bool*>(malloc(sizeof(bool))));
- *scoped_ptr_malloc_bool = true;
- base::Callback<void(scoped_ptr_malloc<bool>)> cb = BindToLoop(
- proxy_, base::Bind(&BoundBoolSetFromScopedPtrMalloc, &bool_val));
- cb.Run(scoped_ptr_malloc_bool.Pass());
- EXPECT_FALSE(bool_val);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_val);
-}
-
-TEST_F(BindToLoopTest, BoolConstRef) {
- bool bool_var = false;
- bool true_var = true;
- const bool& true_ref = true_var;
- base::Closure cb = BindToLoop(proxy_, base::Bind(
- &BoundBoolSetFromConstRef, &bool_var, true_ref));
- cb.Run();
- EXPECT_FALSE(bool_var);
- loop_.RunUntilIdle();
- EXPECT_TRUE(bool_var);
-}
-
-TEST_F(BindToLoopTest, Integers) {
- int a = 0;
- int b = 0;
- base::Callback<void(int, int)> cb = BindToLoop(proxy_, base::Bind(
- &BoundIntegersSet, &a, &b));
- cb.Run(1, -1);
- EXPECT_EQ(a, 0);
- EXPECT_EQ(b, 0);
- loop_.RunUntilIdle();
- EXPECT_EQ(a, 1);
- EXPECT_EQ(b, -1);
-}
-
-} // namespace media
diff --git a/src/media/base/bit_reader.cc b/src/media/base/bit_reader.cc
deleted file mode 100644
index 2ddb8d4..0000000
--- a/src/media/base/bit_reader.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/bit_reader.h"
-
-namespace media {
-
-BitReader::BitReader(const uint8* data, off_t size)
- : data_(data), bytes_left_(size), num_remaining_bits_in_curr_byte_(0) {
- DCHECK(data_ != NULL && bytes_left_ > 0);
-
- UpdateCurrByte();
-}
-
-BitReader::~BitReader() {}
-
-bool BitReader::ReadBitsInternal(int num_bits, uint64* out) {
- DCHECK_LE(num_bits, 64);
-
- *out = 0;
-
- while (num_remaining_bits_in_curr_byte_ != 0 && num_bits != 0) {
- int bits_to_take = std::min(num_remaining_bits_in_curr_byte_, num_bits);
-
- *out <<= bits_to_take;
- *out += curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_to_take);
- num_bits -= bits_to_take;
- num_remaining_bits_in_curr_byte_ -= bits_to_take;
- curr_byte_ &= (1 << num_remaining_bits_in_curr_byte_) - 1;
-
- if (num_remaining_bits_in_curr_byte_ == 0)
- UpdateCurrByte();
- }
-
- return num_bits == 0;
-}
-
-void BitReader::UpdateCurrByte() {
- DCHECK_EQ(num_remaining_bits_in_curr_byte_, 0);
-
- if (bytes_left_ == 0)
- return;
-
- // Load a new byte and advance pointers.
- curr_byte_ = *data_;
- ++data_;
- --bytes_left_;
- num_remaining_bits_in_curr_byte_ = 8;
-}
-
-} // namespace media
diff --git a/src/media/base/bit_reader.h b/src/media/base/bit_reader.h
deleted file mode 100644
index 1becf91..0000000
--- a/src/media/base/bit_reader.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BIT_READER_H_
-#define MEDIA_BASE_BIT_READER_H_
-
-#include <sys/types.h>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// A class to read bit streams.
-class MEDIA_EXPORT BitReader {
- public:
- // Initialize the reader to start reading at |data|, |size| being size
- // of |data| in bytes.
- BitReader(const uint8* data, off_t size);
- ~BitReader();
-
- // Read |num_bits| next bits from stream and return in |*out|, first bit
- // from the stream starting at |num_bits| position in |*out|.
- // |num_bits| cannot be larger than the bits the type can hold.
- // Return false if the given number of bits cannot be read (not enough
- // bits in the stream), true otherwise. When return false, the stream will
- // enter a state where further ReadBits/SkipBits operations will always
- // return false unless |num_bits| is 0. The type |T| has to be a primitive
- // integer type.
- template<typename T> bool ReadBits(int num_bits, T *out) {
- DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
- uint64 temp;
- bool ret = ReadBitsInternal(num_bits, &temp);
- *out = static_cast<T>(temp);
- return ret;
- }
-
- private:
- // Help function used by ReadBits to avoid inlining the bit reading logic.
- bool ReadBitsInternal(int num_bits, uint64* out);
-
- // Advance to the next byte, loading it into curr_byte_.
- // If the num_remaining_bits_in_curr_byte_ is 0 after this function returns,
- // the stream has reached the end.
- void UpdateCurrByte();
-
- // Pointer to the next unread (not in curr_byte_) byte in the stream.
- const uint8* data_;
-
- // Bytes left in the stream (without the curr_byte_).
- off_t bytes_left_;
-
- // Contents of the current byte; first unread bit starting at position
- // 8 - num_remaining_bits_in_curr_byte_ from MSB.
- uint8 curr_byte_;
-
- // Number of bits remaining in curr_byte_
- int num_remaining_bits_in_curr_byte_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(BitReader);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_BIT_READER_H_
diff --git a/src/media/base/bit_reader_unittest.cc b/src/media/base/bit_reader_unittest.cc
deleted file mode 100644
index 48e8c5e..0000000
--- a/src/media/base/bit_reader_unittest.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/bit_reader.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-TEST(BitReaderTest, NormalOperationTest) {
- uint8 value8;
- uint64 value64;
- // 0101 0101 1001 1001 repeats 4 times
- uint8 buffer[] = {0x55, 0x99, 0x55, 0x99, 0x55, 0x99, 0x55, 0x99};
- BitReader reader1(buffer, 6); // Initialize with 6 bytes only
-
- EXPECT_TRUE(reader1.ReadBits(1, &value8));
- EXPECT_EQ(value8, 0);
- EXPECT_TRUE(reader1.ReadBits(8, &value8));
- EXPECT_EQ(value8, 0xab); // 1010 1011
- EXPECT_TRUE(reader1.ReadBits(7, &value64));
- EXPECT_TRUE(reader1.ReadBits(32, &value64));
- EXPECT_EQ(value64, 0x55995599u);
- EXPECT_FALSE(reader1.ReadBits(1, &value8));
- value8 = 0xff;
- EXPECT_TRUE(reader1.ReadBits(0, &value8));
- EXPECT_EQ(value8, 0);
-
- BitReader reader2(buffer, 8);
- EXPECT_TRUE(reader2.ReadBits(64, &value64));
- EXPECT_EQ(value64, 0x5599559955995599ull);
- EXPECT_FALSE(reader2.ReadBits(1, &value8));
- EXPECT_TRUE(reader2.ReadBits(0, &value8));
-}
-
-TEST(BitReaderTest, ReadBeyondEndTest) {
- uint8 value8;
- uint8 buffer[] = {0x12};
- BitReader reader1(buffer, sizeof(buffer));
-
- EXPECT_TRUE(reader1.ReadBits(4, &value8));
- EXPECT_FALSE(reader1.ReadBits(5, &value8));
- EXPECT_FALSE(reader1.ReadBits(1, &value8));
- EXPECT_TRUE(reader1.ReadBits(0, &value8));
-}
-
-} // namespace media
diff --git a/src/media/base/bitstream_buffer.h b/src/media/base/bitstream_buffer.h
deleted file mode 100644
index b7ff3d7..0000000
--- a/src/media/base/bitstream_buffer.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BITSTREAM_BUFFER_H_
-#define MEDIA_BASE_BITSTREAM_BUFFER_H_
-
-#include "base/basictypes.h"
-#include "base/shared_memory.h"
-
-namespace media {
-
-// Class for passing bitstream buffers around. Does not take ownership of the
-// data. This is the media-namespace equivalent of PP_VideoBitstreamBuffer_Dev.
-class BitstreamBuffer {
- public:
- BitstreamBuffer(int32 id, base::SharedMemoryHandle handle, size_t size)
- : id_(id),
- handle_(handle),
- size_(size) {
- }
-
- int32 id() const { return id_; }
- base::SharedMemoryHandle handle() const { return handle_; }
- size_t size() const { return size_; }
-
- private:
- int32 id_;
- base::SharedMemoryHandle handle_;
- size_t size_;
-
- // Allow compiler-generated copy & assign constructors.
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_BITSTREAM_BUFFER_H_
diff --git a/src/media/base/buffers.cc b/src/media/base/buffers.cc
deleted file mode 100644
index 63802c6..0000000
--- a/src/media/base/buffers.cc
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/buffers.h"
-
-namespace media {
-
-Buffer::Buffer(base::TimeDelta timestamp, base::TimeDelta duration)
- : timestamp_(timestamp),
- duration_(duration) {
-}
-
-Buffer::~Buffer() {}
-
-bool Buffer::IsEndOfStream() const {
- return GetData() == NULL;
-}
-
-} // namespace media
diff --git a/src/media/base/buffers.h b/src/media/base/buffers.h
deleted file mode 100644
index d14f4db..0000000
--- a/src/media/base/buffers.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Defines a base class for representing timestamped media data. Every buffer
-// contains a timestamp in microseconds describing the relative position of
-// the buffer within the media stream, and the duration in microseconds for
-// the length of time the buffer will be rendered.
-//
-// Timestamps are derived directly from the encoded media file and are commonly
-// known as the presentation timestamp (PTS). Durations are a best-guess and
-// are usually derived from the sample/frame rate of the media file.
-//
-// Due to encoding and transmission errors, it is not guaranteed that timestamps
-// arrive in a monotonically increasing order nor that the next timestamp will
-// be equal to the previous timestamp plus the duration.
-//
-// In the ideal scenario for a 25fps movie, buffers are timestamped as followed:
-//
-// Buffer0 Buffer1 Buffer2 ... BufferN
-// Timestamp: 0us 40000us 80000us ... (N*40000)us
-// Duration*: 40000us 40000us 40000us ... 40000us
-//
-// *25fps = 0.04s per frame = 40000us per frame
-
-#ifndef MEDIA_BASE_BUFFERS_H_
-#define MEDIA_BASE_BUFFERS_H_
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Indicates an invalid or missing timestamp.
-MEDIA_EXPORT extern inline base::TimeDelta kNoTimestamp() {
- return base::TimeDelta::FromMicroseconds(kint64min);
-}
-
-// Represents an infinite stream duration.
-MEDIA_EXPORT extern inline base::TimeDelta kInfiniteDuration() {
- return base::TimeDelta::FromMicroseconds(kint64max);
-}
-
-class MEDIA_EXPORT Buffer : public base::RefCountedThreadSafe<Buffer> {
- public:
- // Returns a read only pointer to the buffer data.
- virtual const uint8* GetData() const = 0;
-
- // Returns the size of valid data in bytes.
- virtual int GetDataSize() const = 0;
-
- // If there's no data in this buffer, it represents end of stream.
- bool IsEndOfStream() const;
-
- base::TimeDelta GetTimestamp() const {
- return timestamp_;
- }
- void SetTimestamp(const base::TimeDelta& timestamp) {
- timestamp_ = timestamp;
- }
-
- base::TimeDelta GetDuration() const {
- return duration_;
- }
- void SetDuration(const base::TimeDelta& duration) {
- duration_ = duration;
- }
-
- protected:
- friend class base::RefCountedThreadSafe<Buffer>;
- Buffer(base::TimeDelta timestamp, base::TimeDelta duration);
- virtual ~Buffer();
-
- private:
- base::TimeDelta timestamp_;
- base::TimeDelta duration_;
-
- DISALLOW_COPY_AND_ASSIGN(Buffer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_BUFFERS_H_
diff --git a/src/media/base/buffers_unittest.cc b/src/media/base/buffers_unittest.cc
deleted file mode 100644
index a96b40b..0000000
--- a/src/media/base/buffers_unittest.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/string_util.h"
-#include "media/base/buffers.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace {
-
-// Simple implementation of Buffer to test base class functionality.
-class TestBuffer : public Buffer {
- public:
- TestBuffer()
- : Buffer(base::TimeDelta(), base::TimeDelta()) {
- }
-
- // Sets |data_| and |size_| members for testing purposes. Does not take
- // ownership of |data|.
- TestBuffer(const uint8* data, int size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- data_(data),
- size_(size) {
- }
-
- // Buffer implementation.
- virtual const uint8* GetData() const OVERRIDE { return data_; }
- virtual int GetDataSize() const OVERRIDE { return size_; }
-
- protected:
- virtual ~TestBuffer() {}
-
- private:
- const uint8* data_;
- int size_;
-
- DISALLOW_COPY_AND_ASSIGN(TestBuffer);
-};
-
-} // namespace
-
-TEST(BufferTest, Timestamp) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimestampA = base::TimeDelta::FromMicroseconds(1337);
- const base::TimeDelta kTimestampB = base::TimeDelta::FromMicroseconds(1234);
-
- scoped_refptr<TestBuffer> buffer = new TestBuffer();
- EXPECT_TRUE(buffer->GetTimestamp() == kZero);
-
- buffer->SetTimestamp(kTimestampA);
- EXPECT_TRUE(buffer->GetTimestamp() == kTimestampA);
-
- buffer->SetTimestamp(kTimestampB);
- EXPECT_TRUE(buffer->GetTimestamp() == kTimestampB);
-}
-
-TEST(BufferTest, Duration) {
- const base::TimeDelta kZero;
- const base::TimeDelta kDurationA = base::TimeDelta::FromMicroseconds(1337);
- const base::TimeDelta kDurationB = base::TimeDelta::FromMicroseconds(1234);
-
- scoped_refptr<TestBuffer> buffer = new TestBuffer();
- EXPECT_TRUE(buffer->GetDuration() == kZero);
-
- buffer->SetDuration(kDurationA);
- EXPECT_TRUE(buffer->GetDuration() == kDurationA);
-
- buffer->SetDuration(kDurationB);
- EXPECT_TRUE(buffer->GetDuration() == kDurationB);
-}
-
-TEST(BufferTest, IsEndOfStream) {
- const uint8 kData[] = { 0x00, 0xFF };
- const int kDataSize = arraysize(kData);
-
- scoped_refptr<TestBuffer> buffer = new TestBuffer(NULL, 0);
- EXPECT_TRUE(buffer->IsEndOfStream());
-
- buffer = new TestBuffer(kData, kDataSize);
- EXPECT_FALSE(buffer->IsEndOfStream());
-}
-
-} // namespace media
diff --git a/src/media/base/byte_queue.cc b/src/media/base/byte_queue.cc
deleted file mode 100644
index e91bfb7..0000000
--- a/src/media/base/byte_queue.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/byte_queue.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-// Default starting size for the queue.
-enum { kDefaultQueueSize = 1024 };
-
-ByteQueue::ByteQueue()
- : buffer_(new uint8[kDefaultQueueSize]),
- size_(kDefaultQueueSize),
- offset_(0),
- used_(0) {
-}
-
-ByteQueue::~ByteQueue() {}
-
-void ByteQueue::Reset() {
- offset_ = 0;
- used_ = 0;
-}
-
-void ByteQueue::Push(const uint8* data, int size) {
- DCHECK(data);
- DCHECK_GT(size, 0);
-
- size_t size_needed = used_ + size;
-
- // Check to see if we need a bigger buffer.
- if (size_needed > size_) {
- size_t new_size = 2 * size_;
- while (size_needed > new_size && new_size > size_)
- new_size *= 2;
-
- // Sanity check to make sure we didn't overflow.
- CHECK_GT(new_size, size_);
-
- scoped_array<uint8> new_buffer(new uint8[new_size]);
-
- // Copy the data from the old buffer to the start of the new one.
- if (used_ > 0)
- memcpy(new_buffer.get(), front(), used_);
-
- buffer_.reset(new_buffer.release());
- size_ = new_size;
- offset_ = 0;
- } else if ((offset_ + used_ + size) > size_) {
- // The buffer is big enough, but we need to move the data in the queue.
- memmove(buffer_.get(), front(), used_);
- offset_ = 0;
- }
-
- memcpy(front() + used_, data, size);
- used_ += size;
-}
-
-void ByteQueue::Peek(const uint8** data, int* size) const {
- DCHECK(data);
- DCHECK(size);
- *data = front();
- *size = used_;
-}
-
-void ByteQueue::Pop(int count) {
- DCHECK_LE(count, used_);
-
- offset_ += count;
- used_ -= count;
-
- // Move the offset back to 0 if we have reached the end of the buffer.
- if (offset_ == size_) {
- DCHECK_EQ(used_, 0);
- offset_ = 0;
- }
-}
-
-uint8* ByteQueue::front() const { return buffer_.get() + offset_; }
-
-} // namespace media
diff --git a/src/media/base/byte_queue.h b/src/media/base/byte_queue.h
deleted file mode 100644
index 7619472..0000000
--- a/src/media/base/byte_queue.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_BYTE_QUEUE_H_
-#define MEDIA_BASE_BYTE_QUEUE_H_
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Represents a queue of bytes.
-// Data is added to the end of the queue via an Push() call and removed via
-// Pop(). The contents of the queue can be observed via the Peek() method.
-// This class manages the underlying storage of the queue and tries to minimize
-// the number of buffer copies when data is appended and removed.
-class MEDIA_EXPORT ByteQueue {
- public:
- ByteQueue();
- ~ByteQueue();
-
- // Reset the queue to the empty state.
- void Reset();
-
- // Appends new bytes onto the end of the queue.
- void Push(const uint8* data, int size);
-
- // Get a pointer to the front of the queue and the queue size.
- // These values are only valid until the next Push() or
- // Pop() call.
- void Peek(const uint8** data, int* size) const;
-
- // Remove |count| bytes from the front of the queue.
- void Pop(int count);
-
- private:
- // Returns a pointer to the front of the queue.
- uint8* front() const;
-
- scoped_array<uint8> buffer_;
-
- // Size of |buffer_|.
- size_t size_;
-
- // Offset from the start of |buffer_| that marks the front of the queue.
- size_t offset_;
-
- // Number of bytes stored in the queue.
- int used_;
-
- DISALLOW_COPY_AND_ASSIGN(ByteQueue);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_BYTE_QUEUE_H_
diff --git a/src/media/base/channel_layout.cc b/src/media/base/channel_layout.cc
deleted file mode 100644
index 8a442b3..0000000
--- a/src/media/base/channel_layout.cc
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/channel_layout.h"
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-
-namespace media {
-
-static const int kLayoutToChannels[] = {
- 0, // CHANNEL_LAYOUT_NONE
- 0, // CHANNEL_LAYOUT_UNSUPPORTED
- 1, // CHANNEL_LAYOUT_MONO
- 2, // CHANNEL_LAYOUT_STEREO
- 3, // CHANNEL_LAYOUT_2_1
- 3, // CHANNEL_LAYOUT_SURROUND
- 4, // CHANNEL_LAYOUT_4POINT0
- 4, // CHANNEL_LAYOUT_2_2
- 4, // CHANNEL_LAYOUT_QUAD
- 5, // CHANNEL_LAYOUT_5POINT0
- 6, // CHANNEL_LAYOUT_5POINT1
- 5, // CHANNEL_LAYOUT_5POINT0_BACK
- 6, // CHANNEL_LAYOUT_5POINT1_BACK
- 7, // CHANNEL_LAYOUT_7POINT0
- 8, // CHANNEL_LAYOUT_7POINT1
- 8, // CHANNEL_LAYOUT_7POINT1_WIDE
- 2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
- 3, // CHANNEL_LAYOUT_2POINT1
- 4, // CHANNEL_LAYOUT_3_1
- 5, // CHANNEL_LAYOUT_4_1
- 6, // CHANNEL_LAYOUT_6_0
- 6, // CHANNEL_LAYOUT_6_0_FRONT
- 6, // CHANNEL_LAYOUT_HEXAGONAL
- 7, // CHANNEL_LAYOUT_6_1
- 7, // CHANNEL_LAYOUT_6_1_BACK
- 7, // CHANNEL_LAYOUT_6_1_FRONT
- 7, // CHANNEL_LAYOUT_7_0_FRONT
- 8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
- 8, // CHANNEL_LAYOUT_OCTAGONAL
-};
-
-// The channel orderings for each layout as specified by FFmpeg. Each value
-// represents the index of each channel in each layout. Values of -1 mean the
-// channel at that index is not used for that layout.For example, the left side
-// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
-// the order is L, R, C, LFE, LS, RS), so
-// kChannelOrderings[CHANNEL_LAYOUT_5POINT1][SIDE_LEFT] = 4;
-static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
- // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
-
- // CHANNEL_LAYOUT_NONE
- { -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_UNSUPPORTED
- { -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_MONO
- { -1 , -1 , 0 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_STEREO
- { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_2_1
- { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , -1 , -1 },
-
- // CHANNEL_LAYOUT_SURROUND
- { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_4POINT0
- { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
-
- // CHANNEL_LAYOUT_2_2
- { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , 3 },
-
- // CHANNEL_LAYOUT_QUAD
- { 0 , 1 , -1 , -1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_5POINT0
- { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , 3 , 4 },
-
- // CHANNEL_LAYOUT_5POINT1
- { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , 4 , 5 },
-
- // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
-
- // CHANNEL_LAYOUT_5POINT0_BACK
- { 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_5POINT1_BACK
- { 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_7POINT0
- { 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , -1 , 3 , 4 },
-
- // CHANNEL_LAYOUT_7POINT1
- { 0 , 1 , 2 , 3 , 6 , 7 , -1 , -1 , -1 , 4 , 5 },
-
- // CHANNEL_LAYOUT_7POINT1_WIDE
- { 0 , 1 , 2 , 3 , -1 , -1 , 6 , 7 , -1 , 4 , 5 },
-
- // CHANNEL_LAYOUT_STEREO_DOWNMIX
- { 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_2POINT1
- { 0 , 1 , -1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_3_1
- { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_4_1
- { 0 , 1 , 2 , 4 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
-
- // CHANNEL_LAYOUT_6_0
- { 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 5 , 3 , 4 },
-
- // CHANNEL_LAYOUT_6_0_FRONT
- { 0 , 1 , -1 , -1 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
-
- // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
-
- // CHANNEL_LAYOUT_HEXAGONAL
- { 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , 5 , -1 , -1 },
-
- // CHANNEL_LAYOUT_6_1
- { 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , 6 , 4 , 5 },
-
- // CHANNEL_LAYOUT_6_1_BACK
- { 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , 6 , -1 , -1 },
-
- // CHANNEL_LAYOUT_6_1_FRONT
- { 0 , 1 , -1 , 6 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
-
- // CHANNEL_LAYOUT_7_0_FRONT
- { 0 , 1 , 2 , -1 , -1 , -1 , 5 , 6 , -1 , 3 , 4 },
-
- // CHANNEL_LAYOUT_7_1_WIDE_BACK
- { 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , -1 , -1 , -1 },
-
- // CHANNEL_LAYOUT_OCTAGONAL
- { 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , 7 , 3 , 4 },
-
- // FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
-};
-
-int ChannelLayoutToChannelCount(ChannelLayout layout) {
- DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
- return kLayoutToChannels[layout];
-}
-
-int ChannelOrder(ChannelLayout layout, Channels channel) {
- DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
- DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
- return kChannelOrderings[layout][channel];
-}
-
-// Converts a channel count into a channel layout.
-ChannelLayout GuessChannelLayout(int channels) {
- switch (channels) {
- case 1:
- return CHANNEL_LAYOUT_MONO;
- case 2:
- return CHANNEL_LAYOUT_STEREO;
- case 3:
- return CHANNEL_LAYOUT_SURROUND;
- case 4:
- return CHANNEL_LAYOUT_QUAD;
- case 5:
- return CHANNEL_LAYOUT_5_0;
- case 6:
- return CHANNEL_LAYOUT_5_1;
- case 7:
- return CHANNEL_LAYOUT_6_1;
- case 8:
- return CHANNEL_LAYOUT_7_1;
- default:
- DVLOG(1) << "Unsupported channel count: " << channels;
- }
- return CHANNEL_LAYOUT_UNSUPPORTED;
-}
-
-} // namespace media
diff --git a/src/media/base/channel_layout.h b/src/media/base/channel_layout.h
deleted file mode 100644
index 168cce0..0000000
--- a/src/media/base/channel_layout.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_CHANNEL_LAYOUT_H_
-#define MEDIA_BASE_CHANNEL_LAYOUT_H_
-
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Enumerates the various representations of the ordering of audio channels.
-// Logged to UMA, so never reuse a value, always add new/greater ones!
-enum ChannelLayout {
- CHANNEL_LAYOUT_NONE = 0,
- CHANNEL_LAYOUT_UNSUPPORTED = 1,
-
- // Front C
- CHANNEL_LAYOUT_MONO = 2,
-
- // Front L, Front R
- CHANNEL_LAYOUT_STEREO = 3,
-
- // Front L, Front R, Back C
- CHANNEL_LAYOUT_2_1 = 4,
-
- // Front L, Front R, Front C
- CHANNEL_LAYOUT_SURROUND = 5,
-
- // Front L, Front R, Front C, Back C
- CHANNEL_LAYOUT_4_0 = 6,
-
- // Front L, Front R, Side L, Side R
- CHANNEL_LAYOUT_2_2 = 7,
-
- // Front L, Front R, Back L, Back R
- CHANNEL_LAYOUT_QUAD = 8,
-
- // Front L, Front R, Front C, Side L, Side R
- CHANNEL_LAYOUT_5_0 = 9,
-
- // Front L, Front R, Front C, Side L, Side R, LFE
- CHANNEL_LAYOUT_5_1 = 10,
-
- // Front L, Front R, Front C, Back L, Back R
- CHANNEL_LAYOUT_5_0_BACK = 11,
-
- // Front L, Front R, Front C, Back L, Back R, LFE
- CHANNEL_LAYOUT_5_1_BACK = 12,
-
- // Front L, Front R, Front C, Side L, Side R, Back L, Back R
- CHANNEL_LAYOUT_7_0 = 13,
-
- // Front L, Front R, Front C, Side L, Side R, LFE, Back L, Back R
- CHANNEL_LAYOUT_7_1 = 14,
-
- // Front L, Front R, Front C, Side L, Side R, LFE, Front LofC, Front RofC
- CHANNEL_LAYOUT_7_1_WIDE = 15,
-
- // Stereo L, Stereo R
- CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
-
- // Stereo L, Stereo R, LFE
- CHANNEL_LAYOUT_2POINT1 = 17,
-
- // Stereo L, Stereo R, Front C, LFE
- CHANNEL_LAYOUT_3_1 = 18,
-
- // Stereo L, Stereo R, Front C, Rear C, LFE
- CHANNEL_LAYOUT_4_1 = 19,
-
- // Stereo L, Stereo R, Front C, Side L, Side R, Back C
- CHANNEL_LAYOUT_6_0 = 20,
-
- // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
- CHANNEL_LAYOUT_6_0_FRONT = 21,
-
- // Stereo L, Stereo R, Side L, Side R, Front C, Rear C.
- CHANNEL_LAYOUT_HEXAGONAL = 22,
-
- // Stereo L, Stereo R, Side L, Side R, Front C, Rear Center, LFE
- CHANNEL_LAYOUT_6_1 = 23,
-
- // Stereo L, Stereo R, Back L, Back R, Front C, Rear Center, LFE
- CHANNEL_LAYOUT_6_1_BACK = 24,
-
- // Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
- CHANNEL_LAYOUT_6_1_FRONT = 25,
-
- // Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
- CHANNEL_LAYOUT_7_0_FRONT = 26,
-
- // Front L, Front R, Front C, Back L, Back R, LFE, Front LofC, Front RofC
- CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
-
- // Front L, Front R, Front C, Side L, Side R, Rear C, Back L, Back R.
- CHANNEL_LAYOUT_OCTAGONAL = 28,
-
- // Total number of layouts.
- CHANNEL_LAYOUT_MAX // Must always be last!
-};
-
-enum Channels {
- LEFT = 0,
- RIGHT,
- CENTER,
- LFE,
- BACK_LEFT,
- BACK_RIGHT,
- LEFT_OF_CENTER,
- RIGHT_OF_CENTER,
- BACK_CENTER,
- SIDE_LEFT,
- SIDE_RIGHT,
- CHANNELS_MAX
-};
-
-// Returns the expected channel position in an interleaved stream. Values of -1
-// mean the channel at that index is not used for that layout. Values range
-// from 0 to CHANNELS_MAX - 1.
-MEDIA_EXPORT int ChannelOrder(ChannelLayout layout, Channels channel);
-
-// Returns the number of channels in a given ChannelLayout.
-MEDIA_EXPORT int ChannelLayoutToChannelCount(ChannelLayout layout);
-
-// Given the number of channels, return the best layout,
-// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
-MEDIA_EXPORT ChannelLayout GuessChannelLayout(int channels);
-
-} // namespace media
-
-#endif // MEDIA_BASE_CHANNEL_LAYOUT_H_
diff --git a/src/media/base/channel_mixer.cc b/src/media/base/channel_mixer.cc
deleted file mode 100644
index fa4cbb6..0000000
--- a/src/media/base/channel_mixer.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MSVC++ requires this to be set before any other includes to get M_SQRT1_2.
-#define _USE_MATH_DEFINES
-
-#include "media/base/channel_mixer.h"
-
-#include <algorithm>
-#include <cmath>
-
-#include "base/logging.h"
-#include "media/base/audio_bus.h"
-#include "media/base/vector_math.h"
-
-namespace media {
-
-// Default scale factor for mixing two channels together. We use a different
-// value for stereo -> mono and mono -> stereo mixes.
-static const float kEqualPowerScale = static_cast<float>(M_SQRT1_2);
-
-static int ValidateLayout(ChannelLayout layout) {
- CHECK_NE(layout, CHANNEL_LAYOUT_NONE);
- CHECK_NE(layout, CHANNEL_LAYOUT_MAX);
-
- // TODO(dalecurtis, crogers): We will eventually handle unsupported layouts by
- // simply copying the input channels to the output channels, similar to if the
- // user requests identical input and output layouts today.
- CHECK_NE(layout, CHANNEL_LAYOUT_UNSUPPORTED);
-
- // Verify there's at least one channel. Should always be true here by virtue
- // of not being one of the invalid layouts, but lets double check to be sure.
- int channel_count = ChannelLayoutToChannelCount(layout);
- DCHECK_GT(channel_count, 0);
-
- // If we have more than one channel, verify a symmetric layout for sanity.
- // The unit test will verify all possible layouts, so this can be a DCHECK.
- // Symmetry allows simplifying the matrix building code by allowing us to
- // assume that if one channel of a pair exists, the other will too.
- if (channel_count > 1) {
- DCHECK((ChannelOrder(layout, LEFT) >= 0 &&
- ChannelOrder(layout, RIGHT) >= 0) ||
- (ChannelOrder(layout, SIDE_LEFT) >= 0 &&
- ChannelOrder(layout, SIDE_RIGHT) >= 0) ||
- (ChannelOrder(layout, BACK_LEFT) >= 0 &&
- ChannelOrder(layout, BACK_RIGHT) >= 0) ||
- (ChannelOrder(layout, LEFT_OF_CENTER) >= 0 &&
- ChannelOrder(layout, RIGHT_OF_CENTER) >= 0))
- << "Non-symmetric channel layout encountered.";
- } else {
- DCHECK_EQ(layout, CHANNEL_LAYOUT_MONO);
- }
-
- return channel_count;
-}
-
-ChannelMixer::ChannelMixer(ChannelLayout input, ChannelLayout output)
- : input_layout_(input),
- output_layout_(output),
- remapping_(false) {
- // Stereo down mix should never be the output layout.
- CHECK_NE(output_layout_, CHANNEL_LAYOUT_STEREO_DOWNMIX);
-
- int input_channels = ValidateLayout(input_layout_);
- int output_channels = ValidateLayout(output_layout_);
-
- // Size out the initial matrix.
- matrix_.reserve(output_channels);
- for (int output_ch = 0; output_ch < output_channels; ++output_ch)
- matrix_.push_back(std::vector<float>(input_channels, 0));
-
- // Route matching channels and figure out which ones aren't accounted for.
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
- ch = static_cast<Channels>(ch + 1)) {
- int input_ch_index = ChannelOrder(input_layout_, ch);
- int output_ch_index = ChannelOrder(output_layout_, ch);
-
- if (input_ch_index < 0)
- continue;
-
- if (output_ch_index < 0) {
- unaccounted_inputs_.push_back(ch);
- continue;
- }
-
- DCHECK_LT(static_cast<size_t>(output_ch_index), matrix_.size());
- DCHECK_LT(static_cast<size_t>(input_ch_index),
- matrix_[output_ch_index].size());
- matrix_[output_ch_index][input_ch_index] = 1;
- }
-
- // If all input channels are accounted for, there's nothing left to do.
- if (unaccounted_inputs_.empty()) {
- // Since all output channels map directly to inputs we can optimize.
- remapping_ = true;
- return;
- }
-
- // Mix front LR into center.
- if (IsUnaccounted(LEFT)) {
- // When down mixing to mono from stereo, we need to be careful of full scale
- // stereo mixes. Scaling by 1 / sqrt(2) here will likely lead to clipping
- // so we use 1 / 2 instead.
- float scale = (output == CHANNEL_LAYOUT_MONO && input_channels == 2) ?
- 0.5 : kEqualPowerScale;
- Mix(LEFT, CENTER, scale);
- Mix(RIGHT, CENTER, scale);
- }
-
- // Mix center into front LR.
- if (IsUnaccounted(CENTER)) {
- // When up mixing from mono, just do a copy to front LR.
- float scale = (input == CHANNEL_LAYOUT_MONO) ? 1 : kEqualPowerScale;
- MixWithoutAccounting(CENTER, LEFT, scale);
- Mix(CENTER, RIGHT, scale);
- }
-
- // Mix back LR into: side LR || back center || front LR || front center.
- if (IsUnaccounted(BACK_LEFT)) {
- if (HasOutputChannel(SIDE_LEFT)) {
- // If we have side LR, mix back LR into side LR, but instead if the input
- // doesn't have side LR (but output does) copy back LR to side LR.
- float scale = HasInputChannel(SIDE_LEFT) ? kEqualPowerScale : 1;
- Mix(BACK_LEFT, SIDE_LEFT, scale);
- Mix(BACK_RIGHT, SIDE_RIGHT, scale);
- } else if (HasOutputChannel(BACK_CENTER)) {
- // Mix back LR into back center.
- Mix(BACK_LEFT, BACK_CENTER, kEqualPowerScale);
- Mix(BACK_RIGHT, BACK_CENTER, kEqualPowerScale);
- } else if (output > CHANNEL_LAYOUT_MONO) {
- // Mix back LR into front LR.
- Mix(BACK_LEFT, LEFT, kEqualPowerScale);
- Mix(BACK_RIGHT, RIGHT, kEqualPowerScale);
- } else {
- // Mix back LR into front center.
- Mix(BACK_LEFT, CENTER, kEqualPowerScale);
- Mix(BACK_RIGHT, CENTER, kEqualPowerScale);
- }
- }
-
- // Mix side LR into: back LR || back center || front LR || front center.
- if (IsUnaccounted(SIDE_LEFT)) {
- if (HasOutputChannel(BACK_LEFT)) {
- // If we have back LR, mix side LR into back LR, but instead if the input
- // doesn't have back LR (but output does) copy side LR to back LR.
- float scale = HasInputChannel(BACK_LEFT) ? kEqualPowerScale : 1;
- Mix(SIDE_LEFT, BACK_LEFT, scale);
- Mix(SIDE_RIGHT, BACK_RIGHT, scale);
- } else if (HasOutputChannel(BACK_CENTER)) {
- // Mix side LR into back center.
- Mix(SIDE_LEFT, BACK_CENTER, kEqualPowerScale);
- Mix(SIDE_RIGHT, BACK_CENTER, kEqualPowerScale);
- } else if (output > CHANNEL_LAYOUT_MONO) {
- // Mix side LR into front LR.
- Mix(SIDE_LEFT, LEFT, kEqualPowerScale);
- Mix(SIDE_RIGHT, RIGHT, kEqualPowerScale);
- } else {
- // Mix side LR into front center.
- Mix(SIDE_LEFT, CENTER, kEqualPowerScale);
- Mix(SIDE_RIGHT, CENTER, kEqualPowerScale);
- }
- }
-
- // Mix back center into: back LR || side LR || front LR || front center.
- if (IsUnaccounted(BACK_CENTER)) {
- if (HasOutputChannel(BACK_LEFT)) {
- // Mix back center into back LR.
- MixWithoutAccounting(BACK_CENTER, BACK_LEFT, kEqualPowerScale);
- Mix(BACK_CENTER, BACK_RIGHT, kEqualPowerScale);
- } else if (HasOutputChannel(SIDE_LEFT)) {
- // Mix back center into side LR.
- MixWithoutAccounting(BACK_CENTER, SIDE_LEFT, kEqualPowerScale);
- Mix(BACK_CENTER, SIDE_RIGHT, kEqualPowerScale);
- } else if (output > CHANNEL_LAYOUT_MONO) {
- // Mix back center into front LR.
- // TODO(dalecurtis): Not sure about these values?
- MixWithoutAccounting(BACK_CENTER, LEFT, kEqualPowerScale);
- Mix(BACK_CENTER, RIGHT, kEqualPowerScale);
- } else {
- // Mix back center into front center.
- // TODO(dalecurtis): Not sure about these values?
- Mix(BACK_CENTER, CENTER, kEqualPowerScale);
- }
- }
-
- // Mix LR of center into: front center || front LR.
- if (IsUnaccounted(LEFT_OF_CENTER)) {
- if (HasOutputChannel(LEFT)) {
- // Mix LR of center into front LR.
- Mix(LEFT_OF_CENTER, LEFT, kEqualPowerScale);
- Mix(RIGHT_OF_CENTER, RIGHT, kEqualPowerScale);
- } else {
- // Mix LR of center into front center.
- Mix(LEFT_OF_CENTER, CENTER, kEqualPowerScale);
- Mix(RIGHT_OF_CENTER, CENTER, kEqualPowerScale);
- }
- }
-
- // Mix LFE into: front LR || front center.
- if (IsUnaccounted(LFE)) {
- if (!HasOutputChannel(CENTER)) {
- // Mix LFE into front LR.
- MixWithoutAccounting(LFE, LEFT, kEqualPowerScale);
- Mix(LFE, RIGHT, kEqualPowerScale);
- } else {
- // Mix LFE into front center.
- Mix(LFE, CENTER, kEqualPowerScale);
- }
- }
-
- // All channels should now be accounted for.
- DCHECK(unaccounted_inputs_.empty());
-
- // See if the output |matrix_| is simply a remapping matrix. If each input
- // channel maps to a single output channel we can simply remap. Doing this
- // programmatically is less fragile than logic checks on channel mappings.
- for (int output_ch = 0; output_ch < output_channels; ++output_ch) {
- int input_mappings = 0;
- for (int input_ch = 0; input_ch < input_channels; ++input_ch) {
- // We can only remap if each row contains a single scale of 1. I.e., each
- // output channel is mapped from a single unscaled input channel.
- if (matrix_[output_ch][input_ch] != 1 || ++input_mappings > 1)
- return;
- }
- }
-
- // If we've gotten here, |matrix_| is simply a remapping.
- remapping_ = true;
-}
-
-ChannelMixer::~ChannelMixer() {}
-
-void ChannelMixer::Transform(const AudioBus* input, AudioBus* output) {
- CHECK_EQ(matrix_.size(), static_cast<size_t>(output->channels()));
- CHECK_EQ(matrix_[0].size(), static_cast<size_t>(input->channels()));
- CHECK_EQ(input->frames(), output->frames());
-
- // Zero initialize |output| so we're accumulating from zero.
- output->Zero();
-
- // If we're just remapping we can simply copy the correct input to output.
- if (remapping_) {
- for (int output_ch = 0; output_ch < output->channels(); ++output_ch) {
- for (int input_ch = 0; input_ch < input->channels(); ++input_ch) {
- float scale = matrix_[output_ch][input_ch];
- if (scale > 0) {
- DCHECK_EQ(scale, 1.0f);
- memcpy(output->channel(output_ch), input->channel(input_ch),
- sizeof(*output->channel(output_ch)) * output->frames());
- break;
- }
- }
- }
- return;
- }
-
- for (int output_ch = 0; output_ch < output->channels(); ++output_ch) {
- for (int input_ch = 0; input_ch < input->channels(); ++input_ch) {
- float scale = matrix_[output_ch][input_ch];
- // Scale should always be positive. Don't bother scaling by zero.
- DCHECK_GE(scale, 0);
- if (scale > 0) {
- vector_math::FMAC(input->channel(input_ch), scale, output->frames(),
- output->channel(output_ch));
- }
- }
- }
-}
-
-void ChannelMixer::AccountFor(Channels ch) {
- unaccounted_inputs_.erase(std::find(
- unaccounted_inputs_.begin(), unaccounted_inputs_.end(), ch));
-}
-
-bool ChannelMixer::IsUnaccounted(Channels ch) {
- return std::find(unaccounted_inputs_.begin(), unaccounted_inputs_.end(),
- ch) != unaccounted_inputs_.end();
-}
-
-bool ChannelMixer::HasInputChannel(Channels ch) {
- return ChannelOrder(input_layout_, ch) >= 0;
-}
-
-bool ChannelMixer::HasOutputChannel(Channels ch) {
- return ChannelOrder(output_layout_, ch) >= 0;
-}
-
-void ChannelMixer::Mix(Channels input_ch, Channels output_ch, float scale) {
- MixWithoutAccounting(input_ch, output_ch, scale);
- AccountFor(input_ch);
-}
-
-void ChannelMixer::MixWithoutAccounting(Channels input_ch, Channels output_ch,
- float scale) {
- int input_ch_index = ChannelOrder(input_layout_, input_ch);
- int output_ch_index = ChannelOrder(output_layout_, output_ch);
-
- DCHECK(IsUnaccounted(input_ch));
- DCHECK_GE(input_ch_index, 0);
- DCHECK_GE(output_ch_index, 0);
-
- DCHECK_EQ(matrix_[output_ch_index][input_ch_index], 0);
- matrix_[output_ch_index][input_ch_index] = scale;
-}
-
-} // namespace media
diff --git a/src/media/base/channel_mixer.h b/src/media/base/channel_mixer.h
deleted file mode 100644
index 0fdcc18..0000000
--- a/src/media/base/channel_mixer.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_CHANNEL_MIXER_H_
-#define MEDIA_BASE_CHANNEL_MIXER_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "media/base/channel_layout.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioBus;
-
-// ChannelMixer is for converting audio between channel layouts. The conversion
-// matrix is built upon construction and used during each Transform() call. The
-// algorithm works by generating a conversion matrix mapping each output channel
-// to list of input channels. The transform renders all of the output channels,
-// with each output channel rendered according to a weighted sum of the relevant
-// input channels as defined in the matrix.
-class MEDIA_EXPORT ChannelMixer {
- public:
- ChannelMixer(ChannelLayout input, ChannelLayout output);
- ~ChannelMixer();
-
- // Transforms all channels from |input| into |output| channels.
- void Transform(const AudioBus* input, AudioBus* output);
-
- private:
- // Constructor helper methods for managing unaccounted input channels.
- void AccountFor(Channels ch);
- bool IsUnaccounted(Channels ch);
-
- // Helper methods for checking if |ch| exists in either |input_layout_| or
- // |output_layout_| respectively.
- bool HasInputChannel(Channels ch);
- bool HasOutputChannel(Channels ch);
-
- // Constructor helper methods for updating |matrix_| with the proper value for
- // mixing |input_ch| into |output_ch|. MixWithoutAccounting() does not remove
- // the channel from |unaccounted_inputs_|.
- void Mix(Channels input_ch, Channels output_ch, float scale);
- void MixWithoutAccounting(Channels input_ch, Channels output_ch, float scale);
-
- // Input and output channel layout provided during construction.
- ChannelLayout input_layout_;
- ChannelLayout output_layout_;
-
- // Helper variable for tracking which inputs are currently unaccounted, should
- // be empty after construction completes.
- std::vector<Channels> unaccounted_inputs_;
-
- // 2D matrix of output channels to input channels.
- std::vector< std::vector<float> > matrix_;
-
- // Optimization case for when we can simply remap the input channels to output
- // channels and don't need to do a multiply-accumulate loop over |matrix_|.
- bool remapping_;
-
- DISALLOW_COPY_AND_ASSIGN(ChannelMixer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_CHANNEL_MIXER_H_
diff --git a/src/media/base/channel_mixer_unittest.cc b/src/media/base/channel_mixer_unittest.cc
deleted file mode 100644
index a71f86b..0000000
--- a/src/media/base/channel_mixer_unittest.cc
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MSVC++ requires this to be set before any other includes to get M_SQRT1_2.
-#define _USE_MATH_DEFINES
-
-#include <cmath>
-
-#include "base/stringprintf.h"
-#include "media/base/audio_bus.h"
-#include "media/base/channel_mixer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-// Number of frames to test with.
-enum { kFrames = 16 };
-
-// Test all possible layout conversions can be constructed and mixed.
-TEST(ChannelMixerTest, ConstructAllPossibleLayouts) {
- for (ChannelLayout input_layout = CHANNEL_LAYOUT_MONO;
- input_layout < CHANNEL_LAYOUT_MAX;
- input_layout = static_cast<ChannelLayout>(input_layout + 1)) {
- for (ChannelLayout output_layout = CHANNEL_LAYOUT_MONO;
- output_layout < CHANNEL_LAYOUT_STEREO_DOWNMIX;
- output_layout = static_cast<ChannelLayout>(output_layout + 1)) {
- SCOPED_TRACE(base::StringPrintf(
- "Input Layout: %d, Output Layout: %d", input_layout, output_layout));
- ChannelMixer mixer(input_layout, output_layout);
- scoped_ptr<AudioBus> input_bus = AudioBus::Create(
- ChannelLayoutToChannelCount(input_layout), kFrames);
- scoped_ptr<AudioBus> output_bus = AudioBus::Create(
- ChannelLayoutToChannelCount(output_layout), kFrames);
- for (int ch = 0; ch < input_bus->channels(); ++ch)
- std::fill(input_bus->channel(ch), input_bus->channel(ch) + kFrames, 1);
-
- mixer.Transform(input_bus.get(), output_bus.get());
- }
- }
-}
-
-struct ChannelMixerTestData {
- ChannelMixerTestData(ChannelLayout input_layout, ChannelLayout output_layout,
- float* channel_values, int num_channel_values,
- float scale)
- : input_layout(input_layout),
- output_layout(output_layout),
- channel_values(channel_values),
- num_channel_values(num_channel_values),
- scale(scale) {
- }
-
- std::string DebugString() const {
- return base::StringPrintf(
- "Input Layout: %d, Output Layout %d, Scale: %f", input_layout,
- output_layout, scale);
- }
-
- ChannelLayout input_layout;
- ChannelLayout output_layout;
- float* channel_values;
- int num_channel_values;
- float scale;
-};
-
-std::ostream& operator<<(std::ostream& os, const ChannelMixerTestData& data) {
- return os << data.DebugString();
-}
-
-class ChannelMixerTest : public testing::TestWithParam<ChannelMixerTestData> {};
-
-// Verify channels are mixed and scaled correctly. The test only works if all
-// output channels have the same value.
-TEST_P(ChannelMixerTest, Mixing) {
- ChannelLayout input_layout = GetParam().input_layout;
- ChannelLayout output_layout = GetParam().output_layout;
-
- ChannelMixer mixer(input_layout, output_layout);
- scoped_ptr<AudioBus> input_bus = AudioBus::Create(
- ChannelLayoutToChannelCount(input_layout), kFrames);
- scoped_ptr<AudioBus> output_bus = AudioBus::Create(
- ChannelLayoutToChannelCount(output_layout), kFrames);
-
- const float* channel_values = GetParam().channel_values;
- ASSERT_EQ(input_bus->channels(), GetParam().num_channel_values);
-
- float expected_value = 0;
- float scale = GetParam().scale;
- for (int ch = 0; ch < input_bus->channels(); ++ch) {
- std::fill(input_bus->channel(ch), input_bus->channel(ch) + kFrames,
- channel_values[ch]);
- expected_value += channel_values[ch] * scale;
- }
-
- mixer.Transform(input_bus.get(), output_bus.get());
-
- for (int ch = 0; ch < output_bus->channels(); ++ch) {
- for (int frame = 0; frame < output_bus->frames(); ++frame) {
- ASSERT_FLOAT_EQ(output_bus->channel(ch)[frame], expected_value);
- }
- }
-}
-
-static float kStereoToMonoValues[] = { 0.5f, 0.75f };
-static float kMonoToStereoValues[] = { 0.5f };
-// Zero the center channel since it will be mixed at scale 1 vs M_SQRT1_2.
-static float kFiveOneToMonoValues[] = { 0.1f, 0.2f, 0.0f, 0.4f, 0.5f, 0.6f };
-
-// Run through basic sanity tests for some common conversions.
-INSTANTIATE_TEST_CASE_P(ChannelMixerTest, ChannelMixerTest, testing::Values(
- ChannelMixerTestData(CHANNEL_LAYOUT_STEREO, CHANNEL_LAYOUT_MONO,
- kStereoToMonoValues, arraysize(kStereoToMonoValues),
- 0.5f),
- ChannelMixerTestData(CHANNEL_LAYOUT_MONO, CHANNEL_LAYOUT_STEREO,
- kMonoToStereoValues, arraysize(kMonoToStereoValues),
- 1.0f),
- ChannelMixerTestData(CHANNEL_LAYOUT_5_1, CHANNEL_LAYOUT_MONO,
- kFiveOneToMonoValues, arraysize(kFiveOneToMonoValues),
- static_cast<float>(M_SQRT1_2))
-));
-
-} // namespace media
diff --git a/src/media/base/clock.cc b/src/media/base/clock.cc
deleted file mode 100644
index 2432f91..0000000
--- a/src/media/base/clock.cc
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/clock.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-Clock::Clock(TimeProvider* time_provider)
- : time_provider_(time_provider) {
- Reset();
-}
-
-Clock::~Clock() {}
-
-bool Clock::IsPlaying() const {
- return playing_;
-}
-
-base::TimeDelta Clock::Play() {
- DCHECK(!playing_);
- UpdateReferencePoints();
- playing_ = true;
- return media_time_;
-}
-
-base::TimeDelta Clock::Pause() {
- DCHECK(playing_);
- UpdateReferencePoints();
- playing_ = false;
- return media_time_;
-}
-
-void Clock::SetPlaybackRate(float playback_rate) {
- UpdateReferencePoints();
- playback_rate_ = playback_rate;
-}
-
-void Clock::SetTime(base::TimeDelta current_time, base::TimeDelta max_time) {
- DCHECK(current_time <= max_time);
- DCHECK(current_time != kNoTimestamp());
-
- UpdateReferencePoints(current_time);
- max_time_ = ClampToValidTimeRange(max_time);
- underflow_ = false;
-}
-
-base::TimeDelta Clock::Elapsed() {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
-
- // The clock is not advancing, so return the last recorded time.
- if (!playing_ || underflow_)
- return media_time_;
-
- base::TimeDelta elapsed = EstimatedElapsedTime();
- if (max_time_ != kNoTimestamp() && elapsed > max_time_) {
- UpdateReferencePoints(max_time_);
- underflow_ = true;
- elapsed = max_time_;
- }
-
- return elapsed;
-}
-
-void Clock::SetMaxTime(base::TimeDelta max_time) {
- DCHECK(max_time != kNoTimestamp());
-
- UpdateReferencePoints();
- max_time_ = ClampToValidTimeRange(max_time);
-
- underflow_ = media_time_ > max_time_;
- if (underflow_)
- media_time_ = max_time_;
-}
-
-void Clock::SetDuration(base::TimeDelta duration) {
- DCHECK(duration > base::TimeDelta());
- duration_ = duration;
-
- media_time_ = ClampToValidTimeRange(media_time_);
- if (max_time_ != kNoTimestamp())
- max_time_ = ClampToValidTimeRange(max_time_);
-}
-
-base::TimeDelta Clock::ElapsedViaProvidedTime(const base::Time& time) const {
- // TODO(scherkus): floating point badness scaling time by playback rate.
- int64 now_us = (time - reference_).InMicroseconds();
- now_us = static_cast<int64>(now_us * playback_rate_);
- return media_time_ + base::TimeDelta::FromMicroseconds(now_us);
-}
-
-base::Time Clock::GetTimeFromProvider() const {
- if (time_provider_)
- return time_provider_();
- return base::Time();
-}
-
-base::TimeDelta Clock::ClampToValidTimeRange(base::TimeDelta time) const {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
- return std::max(std::min(time, duration_), base::TimeDelta());
-}
-
-void Clock::EndOfStream() {
- Pause();
- SetTime(Duration(), Duration());
-}
-
-base::TimeDelta Clock::Duration() const {
- if (duration_ == kNoTimestamp())
- return base::TimeDelta();
- return duration_;
-}
-
-void Clock::UpdateReferencePoints() {
- UpdateReferencePoints(Elapsed());
-}
-
-void Clock::UpdateReferencePoints(base::TimeDelta current_time) {
- media_time_ = ClampToValidTimeRange(current_time);
- reference_ = GetTimeFromProvider();
-}
-
-base::TimeDelta Clock::EstimatedElapsedTime() {
- return ClampToValidTimeRange(
- ElapsedViaProvidedTime(GetTimeFromProvider()));
-}
-
-void Clock::Reset() {
- playing_ = false;
- playback_rate_ = 1.0f;
- max_time_ = kNoTimestamp();
- duration_ = kNoTimestamp();
- media_time_ = base::TimeDelta();
- reference_ = base::Time();
- underflow_ = false;
-}
-
-} // namespace media
diff --git a/src/media/base/clock.h b/src/media/base/clock.h
deleted file mode 100644
index 5b2a90c..0000000
--- a/src/media/base/clock.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_CLOCK_H_
-#define MEDIA_BASE_CLOCK_H_
-
-#include "base/basictypes.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// A clock represents a single source of time to allow audio and video streams
-// to synchronize with each other. Clock essentially tracks the media time with
-// respect to some other source of time, whether that may be the system clock or
-// updates via SetTime(). Clock uses linear interpolation to calculate the
-// current media time since the last time SetTime() was called.
-//
-// Clocks start off paused with a playback rate of 1.0f and a media time of 0.
-//
-// Clock is not thread-safe and must be externally locked.
-//
-// TODO(scherkus): Clock will some day be responsible for executing callbacks
-// given a media time. This will be used primarily by video renderers. For now
-// we'll keep using a poll-and-sleep solution.
-class MEDIA_EXPORT Clock {
- public:
- // Type for a static function pointer that acts as a time source.
- typedef base::Time(TimeProvider)();
-
- explicit Clock(TimeProvider* time_provider);
- ~Clock();
-
- // Returns true if the clock is running.
- bool IsPlaying() const;
-
- // Starts the clock and returns the current media time, which will increase
- // with respect to the current playback rate.
- base::TimeDelta Play();
-
- // Stops the clock and returns the current media time, which will remain
- // constant until Play() is called.
- base::TimeDelta Pause();
-
- // Sets a new playback rate. The rate at which the media time will increase
- // will now change.
- void SetPlaybackRate(float playback_rate);
-
- // Forcefully sets the media time to |current_time|. The second parameter is
- // the |max_time| that the clock should progress after a call to Play(). This
- // value is often the time of the end of the last frame buffered and decoded.
- //
- // These values are clamped to the duration of the video, which is initially
- // set to 0 (before SetDuration() is called).
- void SetTime(base::TimeDelta current_time, base::TimeDelta max_time);
-
- // Sets the |max_time| to be returned by a call to Elapsed().
- void SetMaxTime(base::TimeDelta max_time);
-
- // Returns the current elapsed media time. Returns 0 if SetDuration() has
- // never been called.
- base::TimeDelta Elapsed();
-
- // Sets the duration of the video. Clock expects the duration will be set
- // exactly once.
- void SetDuration(base::TimeDelta duration);
-
- // Resets clock to an uninitialized state.
- void Reset();
-
- // Notifies the clock that the end of stream has been reached. The clock state
- // is updated accordingly.
- void EndOfStream();
-
- // Returns the duration of the clock, or 0 if not set.
- base::TimeDelta Duration() const;
-
- private:
- // Updates the reference points based on the current calculated time.
- void UpdateReferencePoints();
-
- // Updates the reference points based on the given |current_time|.
- void UpdateReferencePoints(base::TimeDelta current_time);
-
- // Returns the time elapsed based on the current reference points, ignoring
- // the |max_time_| cap.
- base::TimeDelta EstimatedElapsedTime();
-
- // Returns the current media time treating the given time as the latest
- // value as returned by |time_provider_|.
- base::TimeDelta ElapsedViaProvidedTime(const base::Time& time) const;
-
- base::Time GetTimeFromProvider() const;
-
- base::TimeDelta ClampToValidTimeRange(base::TimeDelta time) const;
-
- // Function returning current time in base::Time units.
- TimeProvider* time_provider_;
-
- // Whether the clock is running.
- bool playing_;
-
- // Whether the clock is stalled because it has reached the |max_time_|
- // allowed.
- bool underflow_;
-
- // The system clock time when this clock last starting playing or had its
- // time set via SetTime().
- base::Time reference_;
-
- // Current accumulated amount of media time. The remaining portion must be
- // calculated by comparing the system time to the reference time.
- base::TimeDelta media_time_;
-
- // Current playback rate.
- float playback_rate_;
-
- // The maximum time that can be returned by calls to Elapsed().
- base::TimeDelta max_time_;
-
- // Duration of the media.
- base::TimeDelta duration_;
-
- DISALLOW_COPY_AND_ASSIGN(Clock);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_CLOCK_H_
diff --git a/src/media/base/clock_unittest.cc b/src/media/base/clock_unittest.cc
deleted file mode 100644
index 6773a5b..0000000
--- a/src/media/base/clock_unittest.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "base/test/mock_time_provider.h"
-#include "media/base/clock.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-using ::testing::InSequence;
-using ::testing::Return;
-using ::testing::StrictMock;
-
-namespace base {
-
-// Provide a stream output operator so we can use EXPECT_EQ(...) with TimeDelta.
-//
-// TODO(scherkus): move this into the testing package.
-static std::ostream& operator<<(std::ostream& stream, const TimeDelta& time) {
- return (stream << time.ToInternalValue());
-}
-
-} // namespace
-
-namespace media {
-
-static const int kDurationInSeconds = 120;
-
-class ClockTest : public ::testing::Test {
- public:
- ClockTest()
- : clock_(&base::MockTimeProvider::StaticNow) {
- SetDuration();
- EXPECT_CALL(mock_time_, Now())
- .WillRepeatedly(Return(base::Time::UnixEpoch()));
- }
-
- protected:
- void SetDuration() {
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- clock_.SetDuration(kDuration);
- EXPECT_EQ(kDuration, clock_.Duration());
- }
-
- void AdvanceSystemTime(base::TimeDelta delta) {
- time_elapsed_ += delta;
- EXPECT_CALL(mock_time_, Now())
- .WillRepeatedly(Return(base::Time::UnixEpoch() + time_elapsed_));
- }
-
- Clock clock_;
- StrictMock<base::MockTimeProvider> mock_time_;
- base::TimeDelta time_elapsed_;
-};
-
-TEST_F(ClockTest, Created) {
- const base::TimeDelta kExpected = base::TimeDelta::FromSeconds(0);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_NormalSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(2);
-
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(kTimeToAdvance, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_DoubleSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(5);
-
- clock_.SetPlaybackRate(2.0f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(2 * kTimeToAdvance, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_HalfSpeed) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeToAdvance = base::TimeDelta::FromSeconds(4);
-
- clock_.SetPlaybackRate(0.5f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kTimeToAdvance);
- EXPECT_EQ(kTimeToAdvance / 2, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_ZeroSpeed) {
- // We'll play for 2 seconds at normal speed, 4 seconds at zero speed, and 8
- // seconds at normal speed.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
- const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
- const base::TimeDelta kExpected = kPlayDuration1 + kPlayDuration3;
-
- EXPECT_EQ(kZero, clock_.Play());
-
- AdvanceSystemTime(kPlayDuration1);
- clock_.SetPlaybackRate(0.0f);
- AdvanceSystemTime(kPlayDuration2);
- clock_.SetPlaybackRate(1.0f);
- AdvanceSystemTime(kPlayDuration3);
-
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Play_MultiSpeed) {
- // We'll play for 2 seconds at half speed, 4 seconds at normal speed, and 8
- // seconds at double speed.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration1 = base::TimeDelta::FromSeconds(2);
- const base::TimeDelta kPlayDuration2 = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPlayDuration3 = base::TimeDelta::FromSeconds(8);
- const base::TimeDelta kExpected =
- kPlayDuration1 / 2 + kPlayDuration2 + 2 * kPlayDuration3;
-
- clock_.SetPlaybackRate(0.5f);
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration1);
-
- clock_.SetPlaybackRate(1.0f);
- AdvanceSystemTime(kPlayDuration2);
-
- clock_.SetPlaybackRate(2.0f);
- AdvanceSystemTime(kPlayDuration3);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, Pause) {
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kPauseDuration = base::TimeDelta::FromSeconds(20);
- const base::TimeDelta kExpectedFirstPause = kPlayDuration;
- const base::TimeDelta kExpectedSecondPause = 2 * kPlayDuration;
-
- // Play for 4 seconds.
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
-
- // Pause for 20 seconds.
- EXPECT_EQ(kExpectedFirstPause, clock_.Pause());
- EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
- AdvanceSystemTime(kPauseDuration);
- EXPECT_EQ(kExpectedFirstPause, clock_.Elapsed());
-
- // Play again for 4 more seconds.
- EXPECT_EQ(kExpectedFirstPause, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
- EXPECT_EQ(kExpectedSecondPause, clock_.Pause());
- EXPECT_EQ(kExpectedSecondPause, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetTime_Paused) {
- const base::TimeDelta kFirstTime = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kSecondTime = base::TimeDelta::FromSeconds(16);
-
- clock_.SetTime(kFirstTime, clock_.Duration());
- EXPECT_EQ(kFirstTime, clock_.Elapsed());
- clock_.SetTime(kSecondTime, clock_.Duration());
- EXPECT_EQ(kSecondTime, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetTime_Playing) {
- // We'll play for 4 seconds, then set the time to 12, then play for 4 more
- // seconds.
- const base::TimeDelta kZero;
- const base::TimeDelta kPlayDuration = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kUpdatedTime = base::TimeDelta::FromSeconds(12);
- const base::TimeDelta kExpected = kUpdatedTime + kPlayDuration;
-
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(kPlayDuration);
-
- clock_.SetTime(kUpdatedTime, clock_.Duration());
- AdvanceSystemTime(kPlayDuration);
- EXPECT_EQ(kExpected, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, CapAtMediaDuration_Paused) {
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- const base::TimeDelta kTimeOverDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
-
- // Elapsed time should always be capped at the duration of the media.
- clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, CapAtMediaDuration_Playing) {
- const base::TimeDelta kZero;
- const base::TimeDelta kDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds);
- const base::TimeDelta kTimeOverDuration =
- base::TimeDelta::FromSeconds(kDurationInSeconds + 4);
-
- // Play for twice as long as the duration of the media.
- EXPECT_EQ(kZero, clock_.Play());
- AdvanceSystemTime(2 * kDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-
- // Manually set the time past the duration.
- clock_.SetTime(kTimeOverDuration, kTimeOverDuration);
- EXPECT_EQ(kDuration, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetMaxTime) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(6);
-
- EXPECT_EQ(kZero, clock_.Play());
- clock_.SetMaxTime(kMaxTime);
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kTimeInterval, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime, clock_.Elapsed());
-}
-
-TEST_F(ClockTest, SetMaxTime_MultipleTimes) {
- const base::TimeDelta kZero;
- const base::TimeDelta kTimeInterval = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kMaxTime1 = base::TimeDelta::FromSeconds(6);
- const base::TimeDelta kMaxTime2 = base::TimeDelta::FromSeconds(12);
-
- EXPECT_EQ(kZero, clock_.Play());
- clock_.SetMaxTime(clock_.Duration());
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kTimeInterval, clock_.Elapsed());
-
- clock_.SetMaxTime(kMaxTime1);
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- clock_.SetMaxTime(kMaxTime2);
- EXPECT_EQ(kMaxTime1, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime1 + kTimeInterval, clock_.Elapsed());
-
- AdvanceSystemTime(kTimeInterval);
- EXPECT_EQ(kMaxTime2, clock_.Elapsed());
-}
-
-} // namespace media
diff --git a/src/media/base/color_space.cc b/src/media/base/color_space.cc
deleted file mode 100644
index a37c7bb..0000000
--- a/src/media/base/color_space.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "media/base/color_space.h"
-
-#include "starboard/memory.h"
-
-namespace gfx {
-
-ColorSpace::PrimaryID ColorSpace::PrimaryIDFromInt(int primary_id) {
- if (primary_id < 0 || primary_id > static_cast<int>(kPrimaryIdLast))
- return kPrimaryIdUnknown;
- if (primary_id > static_cast<int>(kPrimaryIdLastStandardValue) &&
- primary_id < 1000)
- return kPrimaryIdUnknown;
- return static_cast<PrimaryID>(primary_id);
-}
-
-ColorSpace::TransferID ColorSpace::TransferIDFromInt(int transfer_id) {
- if (transfer_id < 0 || transfer_id > static_cast<int>(kTransferIdLast))
- return kTransferIdUnknown;
- if (transfer_id > static_cast<int>(kTransferIdLastStandardValue) &&
- transfer_id < 1000)
- return kTransferIdUnknown;
- return static_cast<TransferID>(transfer_id);
-}
-
-ColorSpace::MatrixID ColorSpace::MatrixIDFromInt(int matrix_id) {
- if (matrix_id < 0 || matrix_id > static_cast<int>(kMatrixIdLast))
- return kMatrixIdUnknown;
- if (matrix_id > static_cast<int>(kMatrixIdLastStandardValue) &&
- matrix_id < 1000)
- return kMatrixIdUnknown;
- return static_cast<MatrixID>(matrix_id);
-}
-
-ColorSpace::ColorSpace()
- : primaries_(kPrimaryIdUnspecified),
- transfer_(kTransferIdUnspecified),
- matrix_(kMatrixIdUnspecified),
- range_(kRangeIdLimited) {
- SbMemorySet(custom_primary_matrix_, 0, sizeof(custom_primary_matrix_));
-}
-
-ColorSpace::ColorSpace(PrimaryID primaries,
- TransferID transfer,
- MatrixID matrix,
- RangeID range)
- : primaries_(primaries),
- transfer_(transfer),
- matrix_(matrix),
- range_(range) {
- SbMemorySet(custom_primary_matrix_, 0, sizeof(custom_primary_matrix_));
-}
-
-ColorSpace::ColorSpace(int primaries, int transfer, int matrix, RangeID range)
- : primaries_(PrimaryIDFromInt(primaries)),
- transfer_(TransferIDFromInt(transfer)),
- matrix_(MatrixIDFromInt(matrix)),
- range_(range) {
- SbMemorySet(custom_primary_matrix_, 0, sizeof(custom_primary_matrix_));
-}
-
-ColorSpace::ColorSpace(const ColorSpace& other)
- : primaries_(other.primaries_),
- transfer_(other.transfer_),
- matrix_(other.matrix_),
- range_(other.range_) {
- SbMemoryCopy(custom_primary_matrix_, other.custom_primary_matrix_,
- sizeof(custom_primary_matrix_));
-}
-
-ColorSpace::~ColorSpace() {}
-
-// Static
-ColorSpace ColorSpace::CreateXYZD50() {
- return ColorSpace(kPrimaryIdXyzD50, kTransferIdLinear, kMatrixIdRgb,
- kRangeIdFull);
-}
-
-// static
-ColorSpace ColorSpace::CreateJpeg() {
- return ColorSpace(kPrimaryIdBt709, kTransferIdIec6196621, kMatrixIdBt709,
- kRangeIdFull);
-}
-
-// static
-ColorSpace ColorSpace::CreateREC601() {
- return ColorSpace(kPrimaryIdSmpte170M, kTransferIdSmpte170M,
- kMatrixIdSmpte170M, kRangeIdLimited);
-}
-
-// static
-ColorSpace ColorSpace::CreateREC709() {
- return ColorSpace(kPrimaryIdBt709, kTransferIdBt709, kMatrixIdBt709,
- kRangeIdLimited);
-}
-
-bool ColorSpace::operator==(const ColorSpace& other) const {
- if (primaries_ != other.primaries_ || transfer_ != other.transfer_ ||
- matrix_ != other.matrix_ || range_ != other.range_)
- return false;
- if (primaries_ == kPrimaryIdCustom &&
- SbMemoryCompare(custom_primary_matrix_, other.custom_primary_matrix_,
- sizeof(custom_primary_matrix_)))
- return false;
- return true;
-}
-
-bool ColorSpace::operator!=(const ColorSpace& other) const {
- return !(*this == other);
-}
-
-bool ColorSpace::operator<(const ColorSpace& other) const {
- if (primaries_ < other.primaries_)
- return true;
- if (primaries_ > other.primaries_)
- return false;
- if (transfer_ < other.transfer_)
- return true;
- if (transfer_ > other.transfer_)
- return false;
- if (matrix_ < other.matrix_)
- return true;
- if (matrix_ > other.matrix_)
- return false;
- if (range_ < other.range_)
- return true;
- if (range_ > other.range_)
- return false;
- if (primaries_ == kPrimaryIdCustom) {
- int primary_result =
- SbMemoryCompare(custom_primary_matrix_, other.custom_primary_matrix_,
- sizeof(custom_primary_matrix_));
- if (primary_result < 0)
- return true;
- if (primary_result > 0)
- return false;
- }
- return false;
-}
-
-} // namespace gfx
diff --git a/src/media/base/color_space.h b/src/media/base/color_space.h
deleted file mode 100644
index b4086a0..0000000
--- a/src/media/base/color_space.h
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef COBALT_MEDIA_BASE_COLOR_SPACE_H_
-#define COBALT_MEDIA_BASE_COLOR_SPACE_H_
-
-#include <stdint.h>
-
-#include "base/gtest_prod_util.h"
-#include "base/logging.h"
-#include "build/build_config.h"
-#include "media/base/gfx_export.h"
-
-namespace IPC {
-template <class P>
-struct ParamTraits;
-} // namespace IPC
-
-
-// This is a modified version of gfx::ColorSpace with Skia dependency removed.
-// It is tentatively put inside media2 to avoid introducing new code into
-// ui/gfx. It will be further simplified and merged into media in follow up
-// refactors.
-namespace gfx {
-
-// Used to represet a color space for the purpose of color conversion.
-// This is designed to be safe and compact enough to send over IPC
-// between any processes.
-class ColorSpace {
- public:
- enum PrimaryID {
- // The first 0-255 values should match the H264 specification (see Table E-3
- // Colour Primaries in https://www.itu.int/rec/T-REC-H.264/en).
- kPrimaryIdReserved0 = 0,
- kPrimaryIdBt709 = 1,
- kPrimaryIdUnspecified = 2,
- kPrimaryIdReserved = 3,
- kPrimaryIdBt470M = 4,
- kPrimaryIdBt470Bg = 5,
- kPrimaryIdSmpte170M = 6,
- kPrimaryIdSmpte240M = 7,
- kPrimaryIdFilm = 8,
- kPrimaryIdBt2020 = 9,
- kPrimaryIdSmpteSt4281 = 10,
- kPrimaryIdSmpteSt4312 = 11,
- kPrimaryIdSmpteSt4321 = 12,
-
- kPrimaryIdLastStandardValue = kPrimaryIdSmpteSt4321,
-
- // Chrome-specific values start at 1000.
- kPrimaryIdUnknown = 1000,
- kPrimaryIdXyzD50,
- kPrimaryIdCustom,
- kPrimaryIdLast = kPrimaryIdCustom
- };
-
- enum TransferID {
- // The first 0-255 values should match the H264 specification (see Table E-4
- // Transfer Characteristics in https://www.itu.int/rec/T-REC-H.264/en).
- kTransferIdReserved0 = 0,
- kTransferIdBt709 = 1,
- kTransferIdUnspecified = 2,
- kTransferIdReserved = 3,
- kTransferIdGamma22 = 4,
- kTransferIdGamma28 = 5,
- kTransferIdSmpte170M = 6,
- kTransferIdSmpte240M = 7,
- kTransferIdLinear = 8,
- kTransferIdLog = 9,
- kTransferIdLogSqrt = 10,
- kTransferIdIec6196624 = 11,
- kTransferIdBt1361Ecg = 12,
- kTransferIdIec6196621 = 13,
- kTransferId10BitBt2020 = 14,
- kTransferId12BitBt2020 = 15,
- kTransferIdSmpteSt2084 = 16,
- kTransferIdSmpteSt4281 = 17,
- kTransferIdAribStdB67 = 18, // AKA hybrid-log gamma, HLG.
-
- kTransferIdLastStandardValue = kTransferIdSmpteSt4281,
-
- // Chrome-specific values start at 1000.
- kTransferIdUnknown = 1000,
- kTransferIdGamma24,
-
- // This is an ad-hoc transfer function that decodes SMPTE 2084 content
- // into a 0-1 range more or less suitable for viewing on a non-hdr
- // display.
- kTransferIdSmpteSt2084NonHdr,
-
- // TODO(hubbe): Need to store an approximation of the gamma function(s).
- kTransferIdCustom,
- kTransferIdLast = kTransferIdCustom,
- };
-
- enum MatrixID {
- // The first 0-255 values should match the H264 specification (see Table E-5
- // Matrix Coefficients in https://www.itu.int/rec/T-REC-H.264/en).
- kMatrixIdRgb = 0,
- kMatrixIdBt709 = 1,
- kMatrixIdUnspecified = 2,
- kMatrixIdReserved = 3,
- kMatrixIdFcc = 4,
- kMatrixIdBt470Bg = 5,
- kMatrixIdSmpte170M = 6,
- kMatrixIdSmpte240M = 7,
- kMatrixIdYCgCo = 8,
- kMatrixIdBt2020NonconstantLuminance = 9,
- kMatrixIdBt2020ConstantLuminance = 10,
- kMatrixIdYDzDx = 11,
-
- kMatrixIdLastStandardValue = kMatrixIdYDzDx,
-
- // Chrome-specific values start at 1000
- kMatrixIdUnknown = 1000,
- kMatrixIdLast = kMatrixIdUnknown,
- };
-
- // This corresponds to the WebM Range enum which is part of WebM color data
- // (see http://www.webmproject.org/docs/container/#Range).
- // H.264 only uses a bool, which corresponds to the LIMITED/FULL values.
- // Chrome-specific values start at 1000.
- enum RangeID {
- // Range is not explicitly specified / unknown.
- kRangeIdUnspecified = 0,
-
- // Limited Rec. 709 color range with RGB values ranging from 16 to 235.
- kRangeIdLimited = 1,
-
- // Full RGB color range with RGB valees from 0 to 255.
- kRangeIdFull = 2,
-
- // Range is defined by TransferID/MatrixID.
- kRangeIdDerived = 3,
-
- kRangeIdLast = kRangeIdDerived
- };
-
- ColorSpace();
- ColorSpace(PrimaryID primaries,
- TransferID transfer,
- MatrixID matrix,
- RangeID full_range);
- ColorSpace(const ColorSpace& other);
- ColorSpace(int primaries, int transfer, int matrix, RangeID full_range);
- ~ColorSpace();
-
- typedef float CustomPrimaryMatrix[12];
-
- static PrimaryID PrimaryIDFromInt(int primary_id);
- static TransferID TransferIDFromInt(int transfer_id);
- static MatrixID MatrixIDFromInt(int matrix_id);
-
- static ColorSpace CreateXYZD50();
-
- // TODO: Remove these, and replace with more generic constructors.
- static ColorSpace CreateJpeg();
- static ColorSpace CreateREC601();
- static ColorSpace CreateREC709();
-
- bool operator==(const ColorSpace& other) const;
- bool operator!=(const ColorSpace& other) const;
- bool operator<(const ColorSpace& other) const;
-
- PrimaryID primaries() const { return primaries_; }
- TransferID transfer() const { return transfer_; }
- MatrixID matrix() const { return matrix_; }
- RangeID range() const { return range_; }
-
- const CustomPrimaryMatrix& custom_primary_matrix() const {
- DCHECK_EQ(primaries_, kPrimaryIdCustom);
- return custom_primary_matrix_;
- }
-
- private:
- PrimaryID primaries_;
- TransferID transfer_;
- MatrixID matrix_;
- RangeID range_;
-
- // Only used if primaries_ == kPrimaryIdCustom
- float custom_primary_matrix_[12];
-};
-
-} // namespace gfx
-
-#endif // COBALT_MEDIA_BASE_COLOR_SPACE_H_
diff --git a/src/media/base/data_buffer.cc b/src/media/base/data_buffer.cc
deleted file mode 100644
index 28ca491..0000000
--- a/src/media/base/data_buffer.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/data_buffer.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-DataBuffer::DataBuffer(scoped_array<uint8> buffer, int buffer_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- data_(buffer.Pass()),
- buffer_size_(buffer_size),
- data_size_(buffer_size) {
-}
-
-DataBuffer::DataBuffer(int buffer_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- buffer_size_(buffer_size),
- data_size_(0) {
- Initialize();
-}
-
-DataBuffer::DataBuffer(const uint8* data, int data_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- buffer_size_(data_size),
- data_size_(data_size) {
- Initialize();
- memcpy(data_.get(), data, data_size_);
-}
-
-DataBuffer::~DataBuffer() {}
-
-void DataBuffer::Initialize() {
- // Prevent arbitrary pointers.
- if (buffer_size_ <= 0) {
- buffer_size_ = data_size_ = 0;
- data_.reset();
- return;
- }
-
- data_.reset(new uint8[buffer_size_]);
-}
-
-const uint8* DataBuffer::GetData() const {
- return data_.get();
-}
-
-int DataBuffer::GetDataSize() const {
- return data_size_;
-}
-
-uint8* DataBuffer::GetWritableData() {
- return data_.get();
-}
-
-void DataBuffer::SetDataSize(int data_size) {
- DCHECK_LE(data_size, buffer_size_);
- data_size_ = data_size;
-}
-
-int DataBuffer::GetBufferSize() const {
- return buffer_size_;
-}
-
-} // namespace media
diff --git a/src/media/base/data_buffer.h b/src/media/base/data_buffer.h
deleted file mode 100644
index 96e9af5..0000000
--- a/src/media/base/data_buffer.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A simple implementation of Buffer that takes ownership of the given data
-// pointer.
-//
-// DataBuffer assumes that memory was allocated with new uint8[].
-
-#ifndef MEDIA_BASE_DATA_BUFFER_H_
-#define MEDIA_BASE_DATA_BUFFER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-class MEDIA_EXPORT DataBuffer : public Buffer {
- public:
- // Assumes valid data of size |buffer_size|.
- DataBuffer(scoped_array<uint8> buffer, int buffer_size);
-
- // Allocates buffer of size |buffer_size|. If |buffer_size| is 0, |data_| is
- // set to a NULL ptr.
- explicit DataBuffer(int buffer_size);
-
- // Allocates buffer of size |data_size|, copies [data,data+data_size) to
- // the allocated buffer and sets data size to |data_size|.
- DataBuffer(const uint8* data, int data_size);
-
- // Buffer implementation.
- virtual const uint8* GetData() const OVERRIDE;
- virtual int GetDataSize() const OVERRIDE;
-
- // Returns a read-write pointer to the buffer data.
- virtual uint8* GetWritableData();
-
- // Updates the size of valid data in bytes, which must be less than or equal
- // to GetBufferSize().
- virtual void SetDataSize(int data_size);
-
- // Returns the size of the underlying buffer.
- virtual int GetBufferSize() const;
-
- protected:
- virtual ~DataBuffer();
-
- private:
- // Constructor helper method for memory allocations.
- void Initialize();
-
- scoped_array<uint8> data_;
- int buffer_size_;
- int data_size_;
-
- DISALLOW_COPY_AND_ASSIGN(DataBuffer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DATA_BUFFER_H_
diff --git a/src/media/base/data_buffer_unittest.cc b/src/media/base/data_buffer_unittest.cc
deleted file mode 100644
index 71d8389..0000000
--- a/src/media/base/data_buffer_unittest.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/string_util.h"
-#include "media/base/data_buffer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-TEST(DataBufferTest, Constructors) {
- const uint8 kTestData[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
- const int kTestDataSize = arraysize(kTestData);
-
- scoped_refptr<DataBuffer> buffer(new DataBuffer(0));
- EXPECT_FALSE(buffer->GetData());
-
- scoped_refptr<DataBuffer> buffer2(new DataBuffer(kTestDataSize));
- EXPECT_EQ(0, buffer2->GetDataSize());
- EXPECT_EQ(kTestDataSize, buffer2->GetBufferSize());
-
- scoped_refptr<DataBuffer> buffer3(new DataBuffer(kTestData, kTestDataSize));
- EXPECT_EQ(kTestDataSize, buffer3->GetDataSize());
- EXPECT_EQ(kTestDataSize, buffer3->GetBufferSize());
- ASSERT_EQ(0, memcmp(buffer3->GetData(), kTestData, kTestDataSize));
- // Ensure we are copying the data, not just pointing to the original data.
- buffer3->GetWritableData()[0] = 0xFF;
- ASSERT_NE(0, memcmp(buffer3->GetData(), kTestData, kTestDataSize));
-}
-
-TEST(DataBufferTest, ReadingWriting) {
- const char kData[] = "hello";
- const int kDataSize = arraysize(kData);
- const char kNewData[] = "chromium";
- const int kNewDataSize = arraysize(kNewData);
-
- // Create a DataBuffer.
- scoped_refptr<DataBuffer> buffer(new DataBuffer(kDataSize));
- ASSERT_TRUE(buffer);
-
- uint8* data = buffer->GetWritableData();
- ASSERT_TRUE(data);
- ASSERT_EQ(kDataSize, buffer->GetBufferSize());
- memcpy(data, kData, kDataSize);
- buffer->SetDataSize(kDataSize);
- const uint8* read_only_data = buffer->GetData();
- ASSERT_EQ(data, read_only_data);
- ASSERT_EQ(0, memcmp(read_only_data, kData, kDataSize));
- EXPECT_FALSE(buffer->IsEndOfStream());
-
- scoped_refptr<DataBuffer> buffer2(new DataBuffer(kNewDataSize + 10));
- data = buffer2->GetWritableData();
- ASSERT_TRUE(data);
- ASSERT_EQ(kNewDataSize + 10, buffer2->GetBufferSize());
- memcpy(data, kNewData, kNewDataSize);
- buffer2->SetDataSize(kNewDataSize);
- read_only_data = buffer2->GetData();
- EXPECT_EQ(kNewDataSize, buffer2->GetDataSize());
- ASSERT_EQ(data, read_only_data);
- EXPECT_EQ(0, memcmp(read_only_data, kNewData, kNewDataSize));
-}
-
-} // namespace media
diff --git a/src/media/base/data_source.cc b/src/media/base/data_source.cc
deleted file mode 100644
index c25f9e7..0000000
--- a/src/media/base/data_source.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/data_source.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-// static
-const int DataSource::kReadError = -1;
-
-DataSourceHost::~DataSourceHost() {}
-
-DataSource::DataSource() : host_(NULL) {}
-
-DataSource::~DataSource() {}
-
-void DataSource::set_host(DataSourceHost* host) {
- DCHECK(host);
- DCHECK(!host_);
- host_ = host;
-}
-
-void DataSource::SetPlaybackRate(float playback_rate) {}
-
-DataSourceHost* DataSource::host() { return host_; }
-
-} // namespace media
diff --git a/src/media/base/data_source.h b/src/media/base/data_source.h
deleted file mode 100644
index 8de2213..0000000
--- a/src/media/base/data_source.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DATA_SOURCE_H_
-#define MEDIA_BASE_DATA_SOURCE_H_
-
-#include "base/callback.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class MEDIA_EXPORT DataSourceHost {
- public:
- // Set the total size of the media file.
- virtual void SetTotalBytes(int64 total_bytes) = 0;
-
- // Notify the host that byte range [start,end] has been buffered.
- // TODO(fischman): remove this method when demuxing is push-based instead of
- // pull-based. http://crbug.com/131444
- virtual void AddBufferedByteRange(int64 start, int64 end) = 0;
-
- // Notify the host that time range [start,end] has been buffered.
- virtual void AddBufferedTimeRange(base::TimeDelta start,
- base::TimeDelta end) = 0;
-
- protected:
- virtual ~DataSourceHost();
-};
-
-class MEDIA_EXPORT DataSource {
- public:
- typedef base::Callback<void(int64, int64)> StatusCallback;
- typedef base::Callback<void(int)> ReadCB;
- static const int kReadError;
-
- DataSource();
-
- virtual void set_host(DataSourceHost* host);
-
- // Reads |size| bytes from |position| into |data|. And when the read is done
- // or failed, |read_cb| is called with the number of bytes read or
- // kReadError in case of error.
- virtual void Read(int64 position, int size, uint8* data,
- const DataSource::ReadCB& read_cb) = 0;
-
- // Notifies the DataSource of a change in the current playback rate.
- virtual void SetPlaybackRate(float playback_rate);
-
- // Stops the DataSource. Once this is called all future Read() calls will
- // return an error.
- virtual void Stop() = 0;
-
- // Returns true and the file size, false if the file size could not be
- // retrieved.
- virtual bool GetSize(int64* size_out) = 0;
-
- // Returns true if we are performing streaming. In this case seeking is
- // not possible.
- virtual bool IsStreaming() = 0;
-
- // Notify the DataSource of the bitrate of the media.
- // Values of |bitrate| <= 0 are invalid and should be ignored.
- virtual void SetBitrate(int bitrate) = 0;
-
- protected:
- virtual ~DataSource();
-
- DataSourceHost* host();
-
- private:
- DataSourceHost* host_;
-
- DISALLOW_COPY_AND_ASSIGN(DataSource);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DATA_SOURCE_H_
diff --git a/src/media/base/decoder_buffer.cc b/src/media/base/decoder_buffer.cc
deleted file mode 100644
index 14c4151..0000000
--- a/src/media/base/decoder_buffer.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/decoder_buffer.h"
-
-#include "base/logging.h"
-#include "media/base/decrypt_config.h"
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-#include "base/debug/trace_event.h"
-#include "media/base/shell_buffer_factory.h"
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-#if !defined(OS_ANDROID)
-#include "base/memory/aligned_memory.h"
-#endif
-
-namespace media {
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-
-// static
-scoped_refptr<DecoderBuffer> DecoderBuffer::CreateEOSBuffer(
- base::TimeDelta timestamp) {
- scoped_refptr<DecoderBuffer> eos =
- scoped_refptr<DecoderBuffer>(new DecoderBuffer(NULL, 0, true));
- eos->SetTimestamp(timestamp);
- return eos;
-}
-
-void DecoderBuffer::ShrinkTo(int size) {
- CHECK_LE(size, GetAllocatedSize());
- size_ = size;
-}
-
-DecoderBuffer::DecoderBuffer(uint8* reusable_buffer,
- size_t size,
- bool is_keyframe)
- : Buffer(kNoTimestamp(), kInfiniteDuration()),
- buffer_(reusable_buffer),
- size_(size),
- allocated_size_(size),
- is_decrypted_(false),
- is_keyframe_(is_keyframe) {
- if (buffer_) {
- // Retain a reference to the buffer factory, to ensure that we do not
- // outlive it.
- buffer_factory_ = ShellBufferFactory::Instance();
- }
-}
-
-DecoderBuffer::~DecoderBuffer() {
- // recycle our buffer
- if (buffer_) {
- TRACE_EVENT1("media_stack", "DecoderBuffer::~DecoderBuffer()",
- "timestamp", GetTimestamp().InMicroseconds());
- DCHECK_NE(buffer_factory_, (ShellBufferFactory*)NULL);
- buffer_factory_->Reclaim(buffer_);
- }
-}
-
-const DecryptConfig* DecoderBuffer::GetDecryptConfig() const {
- DCHECK(!IsEndOfStream());
- return decrypt_config_.get();
-}
-
-void DecoderBuffer::SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config) {
- DCHECK(!IsEndOfStream());
- decrypt_config_ = decrypt_config.Pass();
-}
-
-void DecoderBuffer::SetBuffer(uint8* reusable_buffer) {
- buffer_ = reusable_buffer;
- if (buffer_) {
- // Retain a reference to the buffer factory, to ensure that we do not
- // outlive it.
- buffer_factory_ = ShellBufferFactory::Instance();
- }
-}
-
-#else // defined(__LB_SHELL__) || defined(COBALT)
-
-DecoderBuffer::DecoderBuffer(int buffer_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- buffer_size_(buffer_size) {
- Initialize();
-}
-
-DecoderBuffer::DecoderBuffer(const uint8* data, int buffer_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- buffer_size_(buffer_size) {
- // Prevent invalid allocations. Also used to create end of stream buffers.
- if (!data) {
- buffer_size_ = 0;
- data_ = NULL;
- return;
- }
-
- Initialize();
- memcpy(data_, data, buffer_size_);
-}
-
-DecoderBuffer::~DecoderBuffer() {
-#if !defined(OS_ANDROID)
- base::AlignedFree(data_);
-#else
- delete[] data_;
-#endif
-}
-
-void DecoderBuffer::Initialize() {
- DCHECK_GE(buffer_size_, 0);
-#if !defined(OS_ANDROID)
- data_ = reinterpret_cast<uint8*>(
- base::AlignedAlloc(buffer_size_ + kPaddingSize, kAlignmentSize));
- memset(data_ + buffer_size_, 0, kPaddingSize);
-#else
- data_ = new uint8[buffer_size_];
-#endif
-}
-
-scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8* data,
- int data_size) {
- DCHECK(data);
- return make_scoped_refptr(new DecoderBuffer(data, data_size));
-}
-
-scoped_refptr<DecoderBuffer> DecoderBuffer::CreateEOSBuffer() {
- return make_scoped_refptr(new DecoderBuffer(NULL, 0));
-}
-
-const uint8* DecoderBuffer::GetData() const {
- return data_;
-}
-
-int DecoderBuffer::GetDataSize() const {
- return buffer_size_;
-}
-
-uint8* DecoderBuffer::GetWritableData() {
- return data_;
-}
-
-const DecryptConfig* DecoderBuffer::GetDecryptConfig() const {
- return decrypt_config_.get();
-}
-
-void DecoderBuffer::SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config) {
- decrypt_config_ = decrypt_config.Pass();
-}
-
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-} // namespace media
diff --git a/src/media/base/decoder_buffer.h b/src/media/base/decoder_buffer.h
deleted file mode 100644
index 16f5cce..0000000
--- a/src/media/base/decoder_buffer.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// A specialized buffer for interfacing with audio / video decoders.
-//
-// Specifically ensures that data is aligned and padded as necessary by the
-// underlying decoding framework. On desktop platforms this means memory is
-// allocated using FFmpeg with particular alignment and padding requirements.
-//
-// Also includes decoder specific functionality for decryption.
-
-#ifndef MEDIA_BASE_DECODER_BUFFER_H_
-#define MEDIA_BASE_DECODER_BUFFER_H_
-
-#include "base/memory/scoped_ptr.h"
-#include "build/build_config.h"
-#include "media/base/buffers.h"
-#include "media/base/decrypt_config.h"
-
-namespace media {
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-
-class DecryptConfig;
-class ShellBufferFactory;
-
-class MEDIA_EXPORT DecoderBuffer : public Buffer {
- public:
- // Create a DecoderBuffer indicating we've reached end of stream or an error.
- // GetData() and GetWritableData() return NULL and GetDataSize() returns 0.
- static scoped_refptr<DecoderBuffer> CreateEOSBuffer(
- base::TimeDelta timestamp);
-
- // Buffer implementation.
- const uint8* GetData() const OVERRIDE { return buffer_; }
- // Data size can be less than allocated size after ShrinkTo is called.
- int GetDataSize() const OVERRIDE { return static_cast<int>(size_); }
-
- int GetAllocatedSize() const { return static_cast<int>(allocated_size_); }
- // This is used by the data that we don't know the exact size before reading.
- void ShrinkTo(int size);
- bool IsKeyframe() const { return is_keyframe_; }
-
- // Returns a read-write pointer to the buffer data.
- virtual uint8* GetWritableData() { return buffer_; }
-
- // Returns a flag indicating whether or not the buffer has been decrypted
- // in-place. If so, a CDM should avoid decrypting it again after a seek.
- bool IsAlreadyDecrypted() { return is_decrypted_; }
- void SetAlreadyDecrypted(bool value) { is_decrypted_ = value; }
-
- virtual const DecryptConfig* GetDecryptConfig() const;
- virtual void SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config);
-
- protected:
- friend class ShellBufferFactory;
- // Should only be called by ShellBufferFactory, consumers should use
- // ShellBufferFactory::AllocateBuffer to make a DecoderBuffer.
- DecoderBuffer(uint8* reusable_buffer, size_t size, bool is_keyframe);
- // For deferred allocation create a shell buffer with buffer_ NULL but a
- // non-zero size. Then we use the SetBuffer() method below to actually
- // set the reusable buffer pointer when it becomes available
- void SetBuffer(uint8* reusable_buffer);
-
- virtual ~DecoderBuffer();
- uint8* buffer_;
- size_t size_;
- size_t allocated_size_;
- scoped_refptr<ShellBufferFactory> buffer_factory_;
- scoped_ptr<DecryptConfig> decrypt_config_;
- bool is_decrypted_;
- bool is_keyframe_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(DecoderBuffer);
-};
-
-#else // defined(__LB_SHELL__) || defined(COBALT)
-
-class MEDIA_EXPORT DecoderBuffer : public Buffer {
- public:
- enum {
- kPaddingSize = 16,
-#if defined(ARCH_CPU_ARM_FAMILY)
- kAlignmentSize = 16
-#else
- kAlignmentSize = 32
-#endif
- };
-
- // Allocates buffer of size |buffer_size| >= 0. Buffer will be padded and
- // aligned as necessary.
- explicit DecoderBuffer(int buffer_size);
-
- // Create a DecoderBuffer whose |data_| is copied from |data|. Buffer will be
- // padded and aligned as necessary. |data| must not be NULL and |size| >= 0.
- static scoped_refptr<DecoderBuffer> CopyFrom(const uint8* data, int size);
-
- // Create a DecoderBuffer indicating we've reached end of stream. GetData()
- // and GetWritableData() will return NULL and GetDataSize() will return 0.
- static scoped_refptr<DecoderBuffer> CreateEOSBuffer();
-
- // Buffer implementation.
- virtual const uint8* GetData() const OVERRIDE;
- virtual int GetDataSize() const OVERRIDE;
-
- // Returns a read-write pointer to the buffer data.
- virtual uint8* GetWritableData();
-
- virtual const DecryptConfig* GetDecryptConfig() const;
- virtual void SetDecryptConfig(scoped_ptr<DecryptConfig> decrypt_config);
-
- protected:
- // Allocates a buffer of size |size| >= 0 and copies |data| into it. Buffer
- // will be padded and aligned as necessary. If |data| is NULL then |data_| is
- // set to NULL and |buffer_size_| to 0.
- DecoderBuffer(const uint8* data, int size);
- virtual ~DecoderBuffer();
-
- private:
- int buffer_size_;
- uint8* data_;
- scoped_ptr<DecryptConfig> decrypt_config_;
-
- // Constructor helper method for memory allocations.
- void Initialize();
-
- DISALLOW_COPY_AND_ASSIGN(DecoderBuffer);
-};
-
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECODER_BUFFER_H_
diff --git a/src/media/base/decoder_buffer_pool.cc b/src/media/base/decoder_buffer_pool.cc
deleted file mode 100644
index 0d52b2a..0000000
--- a/src/media/base/decoder_buffer_pool.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/decoder_buffer_pool.h"
-
-#include "base/logging.h"
-#include "media/base/shell_buffer_factory.h"
-
-namespace media {
-
-DecoderBufferPool::DecoderBufferPool(uint32 sample_size_in_bytes) {
- uint32 buffer_size = kMaxSamplesPerBuffer * sample_size_in_bytes;
- while (decoder_buffers_.size() < kBufferCount) {
- decoder_buffers_.push_back(AllocateFromShellBufferFactory(buffer_size));
- DCHECK(decoder_buffers_.back());
- }
-}
-
-scoped_refptr<DecoderBuffer> DecoderBufferPool::Allocate(size_t size_in_bytes) {
- for (DecoderBuffers::iterator iter = decoder_buffers_.begin();
- iter != decoder_buffers_.end(); ++iter) {
- if ((*iter)->HasOneRef()) {
- DCHECK_LE(size_in_bytes, (*iter)->GetAllocatedSize());
- if (size_in_bytes <= (*iter)->GetAllocatedSize()) {
- (*iter)->ShrinkTo(size_in_bytes);
- return *iter;
- }
- break;
- }
- }
- NOTREACHED();
- return AllocateFromShellBufferFactory(size_in_bytes);
-}
-
-scoped_refptr<DecoderBuffer> DecoderBufferPool::AllocateFromShellBufferFactory(
- size_t size_in_bytes) {
- return ShellBufferFactory::Instance()->AllocateBufferNow(size_in_bytes,
- false);
-}
-
-} // namespace media
diff --git a/src/media/base/decoder_buffer_pool.h b/src/media/base/decoder_buffer_pool.h
deleted file mode 100644
index 2b2636b..0000000
--- a/src/media/base/decoder_buffer_pool.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_DECODER_BUFFER_POOL_H_
-#define MEDIA_BASE_DECODER_BUFFER_POOL_H_
-
-#include <vector>
-
-#include "base/memory/ref_counted.h"
-#include "media/base/decoder_buffer.h"
-#include "media/mp4/aac.h"
-
-namespace media {
-
-// This class is currently used by classes inherited from ShellRawAudioDecoder.
-// These classes may need to allocate DecoderBuffer from ShellBufferFactory
-// during playback. Our progressive demuxer will try to use up all free space
-// of ShellBufferFactory so the allocation made by ShellRawAudioDecoder may
-// fail. This class can pre-allocate DecoderBuffer on playback start to ensure
-// that the raw audio decoder can always have buffer to use.
-class DecoderBufferPool {
- public:
- static const uint32 kMaxAudioChannels = 8; // We support 7.1 at most.
- static const uint32 kMaxSamplesPerBuffer =
- mp4::AAC::kFramesPerAccessUnit * kMaxAudioChannels;
- static const size_t kBufferCount = 48;
-
- DecoderBufferPool(uint32 sample_size_in_bytes);
- scoped_refptr<DecoderBuffer> Allocate(size_t size_in_bytes);
-
- private:
- typedef std::vector<scoped_refptr<DecoderBuffer> > DecoderBuffers;
-
- scoped_refptr<DecoderBuffer> AllocateFromShellBufferFactory(
- size_t size_in_bytes);
-
- DecoderBuffers decoder_buffers_;
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECODER_BUFFER_POOL_H_
diff --git a/src/media/base/decoder_buffer_queue.cc b/src/media/base/decoder_buffer_queue.cc
deleted file mode 100644
index ad91c37..0000000
--- a/src/media/base/decoder_buffer_queue.cc
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/decoder_buffer_queue.h"
-
-#include "base/logging.h"
-#include "media/base/decoder_buffer.h"
-
-namespace media {
-
-DecoderBufferQueue::DecoderBufferQueue()
- : earliest_valid_timestamp_(kNoTimestamp()) {
-}
-
-DecoderBufferQueue::~DecoderBufferQueue() {}
-
-void DecoderBufferQueue::Push(const scoped_refptr<DecoderBuffer>& buffer) {
- CHECK(!buffer->IsEndOfStream());
-
- queue_.push_back(buffer);
-
- // TODO(scherkus): FFmpeg returns some packets with no timestamp after
- // seeking. Fix and turn this into CHECK(). See http://crbug.com/162192
- if (buffer->GetTimestamp() == kNoTimestamp()) {
- DVLOG(1) << "Buffer has no timestamp";
- return;
- }
-
- if (earliest_valid_timestamp_ == kNoTimestamp()) {
- earliest_valid_timestamp_ = buffer->GetTimestamp();
- }
-
- if (buffer->GetTimestamp() < earliest_valid_timestamp_) {
- DVLOG(1)
- << "Out of order timestamps: "
- << buffer->GetTimestamp().InMicroseconds()
- << " vs. "
- << earliest_valid_timestamp_.InMicroseconds();
- return;
- }
-
- earliest_valid_timestamp_ = buffer->GetTimestamp();
- in_order_queue_.push_back(buffer);
-}
-
-scoped_refptr<DecoderBuffer> DecoderBufferQueue::Pop() {
- scoped_refptr<DecoderBuffer> buffer = queue_.front();
- queue_.pop_front();
-
- if (!in_order_queue_.empty() &&
- in_order_queue_.front() == buffer) {
- in_order_queue_.pop_front();
- }
-
- return buffer;
-}
-
-void DecoderBufferQueue::Clear() {
- queue_.clear();
- in_order_queue_.clear();
- earliest_valid_timestamp_ = kNoTimestamp();
-}
-
-bool DecoderBufferQueue::IsEmpty() {
- return queue_.empty();
-}
-
-base::TimeDelta DecoderBufferQueue::Duration() {
- if (in_order_queue_.size() < 2)
- return base::TimeDelta();
-
- base::TimeDelta start = in_order_queue_.front()->GetTimestamp();
- base::TimeDelta end = in_order_queue_.back()->GetTimestamp();
- return end - start;
-}
-
-} // namespace media
diff --git a/src/media/base/decoder_buffer_queue.h b/src/media/base/decoder_buffer_queue.h
deleted file mode 100644
index f75046c..0000000
--- a/src/media/base/decoder_buffer_queue.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
-#define MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
-
-#include <deque>
-
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class DecoderBuffer;
-
-// Maintains a queue of DecoderBuffers in increasing timestamp order.
-//
-// Individual buffer durations are ignored when calculating the duration of the
-// queue i.e., the queue must have at least 2 in-order buffers to calculate
-// duration.
-//
-// Not thread safe: access must be externally synchronized.
-class MEDIA_EXPORT DecoderBufferQueue {
- public:
- DecoderBufferQueue();
- ~DecoderBufferQueue();
-
- // Push |buffer| to the end of the queue. If |buffer| is queued out of order
- // it will be excluded from duration calculations.
- //
- // It is invalid to push an end-of-stream |buffer|.
- void Push(const scoped_refptr<DecoderBuffer>& buffer);
-
- // Pops a DecoderBuffer from the front of the queue.
- //
- // It is invalid to call Pop() on an empty queue.
- scoped_refptr<DecoderBuffer> Pop();
-
- // Removes all queued buffers.
- void Clear();
-
- // Returns true if this queue is empty.
- bool IsEmpty();
-
- // Returns the duration of encoded data stored in this queue as measured by
- // the timestamps of the earliest and latest buffers, ignoring out of order
- // buffers.
- //
- // Returns zero if the queue is empty.
- base::TimeDelta Duration();
-
- private:
- typedef std::deque<scoped_refptr<DecoderBuffer> > Queue;
- Queue queue_;
-
- // A subset of |queue_| that contains buffers that are in strictly
- // increasing timestamp order. Used to calculate Duration() while ignoring
- // out-of-order buffers.
- Queue in_order_queue_;
-
- base::TimeDelta earliest_valid_timestamp_;
-
- DISALLOW_COPY_AND_ASSIGN(DecoderBufferQueue);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECODER_BUFFER_QUEUE_H_
diff --git a/src/media/base/decoder_buffer_queue_unittest.cc b/src/media/base/decoder_buffer_queue_unittest.cc
deleted file mode 100644
index 02cd541..0000000
--- a/src/media/base/decoder_buffer_queue_unittest.cc
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/decoder_buffer.h"
-#include "media/base/decoder_buffer_queue.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static base::TimeDelta ToTimeDelta(int seconds) {
- if (seconds < 0)
- return kNoTimestamp();
- return base::TimeDelta::FromSeconds(seconds);
-}
-
-// Helper to create buffers with specified timestamp in seconds.
-//
-// Negative numbers will be converted to kNoTimestamp();
-static scoped_refptr<DecoderBuffer> CreateBuffer(int timestamp) {
- scoped_refptr<DecoderBuffer> buffer = new DecoderBuffer(0);
- buffer->SetTimestamp(ToTimeDelta(timestamp));
- buffer->SetDuration(ToTimeDelta(0));
- return buffer;
-}
-
-TEST(DecoderBufferQueueTest, IsEmpty) {
- DecoderBufferQueue queue;
- EXPECT_TRUE(queue.IsEmpty());
-
- queue.Push(CreateBuffer(0));
- EXPECT_FALSE(queue.IsEmpty());
-}
-
-TEST(DecoderBufferQueueTest, Clear) {
- DecoderBufferQueue queue;
- queue.Push(CreateBuffer(0));
- queue.Push(CreateBuffer(1));
- EXPECT_FALSE(queue.IsEmpty());
- EXPECT_EQ(1, queue.Duration().InSeconds());
-
- queue.Clear();
- EXPECT_TRUE(queue.IsEmpty());
- EXPECT_EQ(0, queue.Duration().InSeconds());
-}
-
-TEST(DecoderBufferQueueTest, Duration) {
- DecoderBufferQueue queue;
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(0));
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(1));
- EXPECT_EQ(1, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(2));
- EXPECT_EQ(2, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(4));
- EXPECT_EQ(4, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(3, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(2, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-}
-
-TEST(DecoderBufferQueueTest, Duration_OutOfOrder) {
- DecoderBufferQueue queue;
- queue.Push(CreateBuffer(10));
- queue.Push(CreateBuffer(12));
- EXPECT_EQ(2, queue.Duration().InSeconds());
-
- // Out of order: duration shouldn't change.
- queue.Push(CreateBuffer(8));
- EXPECT_EQ(2, queue.Duration().InSeconds());
-
- // Removing first buffer should leave the second buffer as the only buffer
- // included in the duration calculation.
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- // Removing second buffer leaves the out-of-order buffer. It shouldn't be
- // included in duration calculations.
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- // Push a still-too-early buffer. It shouldn't be included in duration
- // calculations.
- queue.Push(CreateBuffer(11));
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- // Push a buffer that's after the earliest valid time. It's a singular valid
- // buffer so duration is still zero.
- queue.Push(CreateBuffer(14));
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- // Push a second valid buffer. We should now have a duration.
- queue.Push(CreateBuffer(17));
- EXPECT_EQ(3, queue.Duration().InSeconds());
-}
-
-TEST(DecoderBufferQueueTest, Duration_NoTimestamp) {
- // Buffers with no timestamp don't affect duration.
- DecoderBufferQueue queue;
- queue.Push(CreateBuffer(0));
- queue.Push(CreateBuffer(4));
- EXPECT_EQ(4, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(-1));
- EXPECT_EQ(4, queue.Duration().InSeconds());
-
- queue.Push(CreateBuffer(6));
- EXPECT_EQ(6, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(2, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-
- queue.Pop();
- EXPECT_EQ(0, queue.Duration().InSeconds());
-}
-
-} // namespace media
diff --git a/src/media/base/decoder_buffer_unittest.cc b/src/media/base/decoder_buffer_unittest.cc
deleted file mode 100644
index 65a14b6..0000000
--- a/src/media/base/decoder_buffer_unittest.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/string_util.h"
-#include "media/base/decoder_buffer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-TEST(DecoderBufferTest, Constructors) {
- scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
- EXPECT_TRUE(buffer->GetData());
- EXPECT_EQ(0, buffer->GetDataSize());
- EXPECT_FALSE(buffer->IsEndOfStream());
-
- const int kTestSize = 10;
- scoped_refptr<DecoderBuffer> buffer3(new DecoderBuffer(kTestSize));
- ASSERT_TRUE(buffer3);
- EXPECT_EQ(kTestSize, buffer3->GetDataSize());
-}
-
-TEST(DecoderBufferTest, CreateEOSBuffer) {
- scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CreateEOSBuffer());
- EXPECT_TRUE(buffer->IsEndOfStream());
- EXPECT_FALSE(buffer->GetData());
- EXPECT_EQ(0, buffer->GetDataSize());
-}
-
-TEST(DecoderBufferTest, CopyFrom) {
- const uint8 kData[] = "hello";
- const int kDataSize = arraysize(kData);
- scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(&kData), kDataSize));
- ASSERT_TRUE(buffer2);
- EXPECT_NE(kData, buffer2->GetData());
- EXPECT_EQ(buffer2->GetDataSize(), kDataSize);
- EXPECT_EQ(0, memcmp(buffer2->GetData(), kData, kDataSize));
- EXPECT_FALSE(buffer2->IsEndOfStream());
-}
-
-#if !defined(OS_ANDROID)
-TEST(DecoderBufferTest, PaddingAlignment) {
- const uint8 kData[] = "hello";
- const int kDataSize = arraysize(kData);
- scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(&kData), kDataSize));
- ASSERT_TRUE(buffer2);
-
- // Padding data should always be zeroed.
- for(int i = 0; i < DecoderBuffer::kPaddingSize; i++)
- EXPECT_EQ((buffer2->GetData() + kDataSize)[i], 0);
-
- // If the data is padded correctly we should be able to read and write past
- // the end of the data by DecoderBuffer::kPaddingSize bytes without crashing
- // or Valgrind/ASAN throwing errors.
- const uint8 kFillChar = 0xFF;
- memset(
- buffer2->GetWritableData() + kDataSize, kFillChar,
- DecoderBuffer::kPaddingSize);
- for(int i = 0; i < DecoderBuffer::kPaddingSize; i++)
- EXPECT_EQ((buffer2->GetData() + kDataSize)[i], kFillChar);
-
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
- buffer2->GetData()) & (DecoderBuffer::kAlignmentSize - 1));
-}
-#endif
-
-TEST(DecoderBufferTest, ReadingWriting) {
- const char kData[] = "hello";
- const int kDataSize = arraysize(kData);
-
- scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(kDataSize));
- ASSERT_TRUE(buffer);
-
- uint8* data = buffer->GetWritableData();
- ASSERT_TRUE(data);
- ASSERT_EQ(kDataSize, buffer->GetDataSize());
- memcpy(data, kData, kDataSize);
- const uint8* read_only_data = buffer->GetData();
- ASSERT_EQ(data, read_only_data);
- ASSERT_EQ(0, memcmp(read_only_data, kData, kDataSize));
- EXPECT_FALSE(buffer->IsEndOfStream());
-}
-
-TEST(DecoderBufferTest, GetDecryptConfig) {
- scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(0));
- EXPECT_FALSE(buffer->GetDecryptConfig());
-}
-
-} // namespace media
diff --git a/src/media/base/decrypt_config.cc b/src/media/base/decrypt_config.cc
deleted file mode 100644
index 0b730ab..0000000
--- a/src/media/base/decrypt_config.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/decrypt_config.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-
-DecryptConfig::DecryptConfig(const std::string& key_id,
- const std::string& iv,
- const std::vector<SubsampleEntry>& subsamples)
- : key_id_(key_id),
- iv_(iv),
- subsamples_(subsamples) {
- CHECK_GT(key_id.size(), 0u);
- CHECK(iv.size() == static_cast<size_t>(DecryptConfig::kDecryptionKeySize) ||
- iv.empty());
-}
-
-#else // defined(__LB_SHELL__) || defined(COBALT)
-
-DecryptConfig::DecryptConfig(const std::string& key_id,
- const std::string& iv,
- const int data_offset,
- const std::vector<SubsampleEntry>& subsamples)
- : key_id_(key_id),
- iv_(iv),
- data_offset_(data_offset),
- subsamples_(subsamples) {
- CHECK_GT(key_id.size(), 0u);
- CHECK(iv.size() == static_cast<size_t>(DecryptConfig::kDecryptionKeySize) ||
- iv.empty());
- CHECK_GE(data_offset, 0);
-}
-
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
-DecryptConfig::~DecryptConfig() {}
-
-} // namespace media
diff --git a/src/media/base/decrypt_config.h b/src/media/base/decrypt_config.h
deleted file mode 100644
index 6dbc4e3..0000000
--- a/src/media/base/decrypt_config.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DECRYPT_CONFIG_H_
-#define MEDIA_BASE_DECRYPT_CONFIG_H_
-
-#include <string>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// The Common Encryption spec provides for subsample encryption, where portions
-// of a sample are set in cleartext. A SubsampleEntry specifies the number of
-// clear and encrypted bytes in each subsample. For decryption, all of the
-// encrypted bytes in a sample should be considered a single logical stream,
-// regardless of how they are divided into subsamples, and the clear bytes
-// should not be considered as part of decryption. This is logically equivalent
-// to concatenating all 'cypher_bytes' portions of subsamples, decrypting that
-// result, and then copying each byte from the decrypted block over the
-// position of the corresponding encrypted byte.
-struct SubsampleEntry {
- uint32 clear_bytes;
- uint32 cypher_bytes;
-};
-
-// Contains all information that a decryptor needs to decrypt a media sample.
-class MEDIA_EXPORT DecryptConfig {
- public:
- // Keys are always 128 bits.
- static const int kDecryptionKeySize = 16;
-
- // |key_id| is the ID that references the decryption key for this sample.
- // |iv| is the initialization vector defined by the encrypted format.
- // Currently |iv| must be 16 bytes as defined by WebM and ISO. Or must be
- // empty which signals an unencrypted frame.
- // |data_offset| is the amount of data that should be discarded from the
- // head of the sample buffer before applying subsample information. A
- // decrypted buffer will be shorter than an encrypted buffer by this amount.
- // |subsamples| defines the clear and encrypted portions of the sample as
- // described above. A decrypted buffer will be equal in size to the sum
- // of the subsample sizes.
- //
- // |data_offset| is applied before |subsamples|.
- DecryptConfig(const std::string& key_id,
- const std::string& iv,
-#if !defined(__LB_SHELL__) && !defined(COBALT)
- const int data_offset,
-#endif // !defined(__LB_SHELL__) && !defined(COBALT)
- const std::vector<SubsampleEntry>& subsamples);
- ~DecryptConfig();
-
- const std::string& key_id() const { return key_id_; }
- const std::string& iv() const { return iv_; }
-#if !defined(__LB_SHELL__) && !defined(COBALT)
- int data_offset() const { return data_offset_; }
-#endif // !defined(__LB_SHELL__) && !defined(COBALT)
- const std::vector<SubsampleEntry>& subsamples() const { return subsamples_; }
-
- private:
- const std::string key_id_;
-
- // Initialization vector.
- const std::string iv_;
-
-#if !defined(__LB_SHELL__) && !defined(COBALT)
- // TODO(fgalligan): Remove |data_offset_| if there is no plan to use it in
- // the future.
- // Amount of data to be discarded before applying subsample information.
- const int data_offset_;
-#endif // !defined(__LB_SHELL__) && !defined(COBALT)
-
- // Subsample information. May be empty for some formats, meaning entire frame
- // (less data ignored by data_offset_) is encrypted.
- const std::vector<SubsampleEntry> subsamples_;
-
- DISALLOW_COPY_AND_ASSIGN(DecryptConfig);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECRYPT_CONFIG_H_
diff --git a/src/media/base/decryptor.cc b/src/media/base/decryptor.cc
deleted file mode 100644
index e9b232d..0000000
--- a/src/media/base/decryptor.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/decryptor.h"
-
-namespace media {
-
-Decryptor::Decryptor() {}
-
-Decryptor::~Decryptor() {}
-
-} // namespace media
diff --git a/src/media/base/decryptor.h b/src/media/base/decryptor.h
deleted file mode 100644
index fd51c96..0000000
--- a/src/media/base/decryptor.h
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DECRYPTOR_H_
-#define MEDIA_BASE_DECRYPTOR_H_
-
-#include <list>
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/audio_decoder.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioDecoderConfig;
-class Buffer;
-class DecoderBuffer;
-class VideoDecoderConfig;
-class VideoFrame;
-
-// Performs key operations and decrypts (and decodes) encrypted buffer.
-//
-// Key operations (GenerateKeyRequest(), AddKey() and CancelKeyRequest())
-// are called on the renderer thread. Therefore, these calls should be fast
-// and nonblocking; key events should be fired asynchronously.
-// All other methods are called on the (video/audio) decoder thread.
-// Decryptor implementations must be thread safe when methods are called
-// following the above model.
-// Depending on the implementation callbacks may be fired synchronously or
-// asynchronously.
-class MEDIA_EXPORT Decryptor {
- public:
- // Reported to UMA, so never reuse a value!
- // Must be kept in sync with WebKit::WebMediaPlayerClient::MediaKeyErrorCode
- // (enforced in webmediaplayer_impl.cc).
- enum KeyError {
- kUnknownError = 1,
- kClientError,
- kServiceError,
- kOutputError,
- kHardwareChangeError,
- kDomainError,
- kMaxKeyError // Must be last and greater than any legit value.
- };
-
- // TODO(xhwang): Replace kError with kDecryptError and kDecodeError.
- // TODO(xhwang): Replace kNeedMoreData with kNotEnoughData.
- enum Status {
- kSuccess, // Decryption successfully completed. Decrypted buffer ready.
- kNoKey, // No key is available to decrypt.
- kNeedMoreData, // Decoder needs more data to produce a frame.
- kError // Key is available but an error occurred during decryption.
- };
-
- // TODO(xhwang): Unify this with DemuxerStream::Type.
- enum StreamType {
- kAudio,
- kVideo
- };
-
- Decryptor();
- virtual ~Decryptor();
-
- // Generates a key request for the |key_system| with |type| and
- // |init_data| provided.
- // Returns true if generating key request succeeded, false otherwise.
- // Note: AddKey() and CancelKeyRequest() should only be called after
- // GenerateKeyRequest() returns true.
- virtual bool GenerateKeyRequest(const std::string& key_system,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) = 0;
-
- // Adds a |key| to the |key_system|. The |key| is not limited to a decryption
- // key. It can be any data that the key system accepts, such as a license.
- // If multiple calls of this function set different keys for the same
- // key ID, the older key will be replaced by the newer key.
- virtual void AddKey(const std::string& key_system,
- const uint8* key,
- int key_length,
- const uint8* init_data,
- int init_data_length,
- const std::string& session_id) = 0;
-
- // Cancels the key request specified by |session_id|.
- virtual void CancelKeyRequest(const std::string& key_system,
- const std::string& session_id) = 0;
-
- // Indicates that a key has been added to the Decryptor.
- typedef base::Callback<void()> KeyAddedCB;
-
- // Registers a KeyAddedCB which should be called when a key is added to the
- // decryptor. Only one KeyAddedCB can be registered for one |stream_type|.
- // If this function is called multiple times for the same |stream_type|, the
- // previously registered callback will be replaced. In other words,
- // registering a null callback cancels the originally registered callback.
- virtual void RegisterKeyAddedCB(StreamType stream_type,
- const KeyAddedCB& key_added_cb) = 0;
-
- // Indicates completion of a decryption operation.
- //
- // First parameter: The status of the decryption operation.
- // - Set to kSuccess if the encrypted buffer is successfully decrypted and
- // the decrypted buffer is ready to be read.
- // - Set to kNoKey if no decryption key is available to decrypt the encrypted
- // buffer. In this case the decrypted buffer must be NULL.
- // - Set to kError if unexpected error has occurred. In this case the
- // decrypted buffer must be NULL.
- // - This parameter should not be set to kNeedMoreData.
- // Second parameter: The decrypted buffer.
- typedef base::Callback<void(Status,
- const scoped_refptr<DecoderBuffer>&)> DecryptCB;
-
- // Decrypts the |encrypted| buffer. The decrypt status and decrypted buffer
- // are returned via the provided callback |decrypt_cb|. The |encrypted| buffer
- // must not be NULL.
- // Decrypt() should not be called until any previous DecryptCB of the same
- // |stream_type| has completed. Thus, only one DecryptCB may be pending at
- // a time for a given |stream_type|.
- virtual void Decrypt(StreamType stream_type,
- const scoped_refptr<DecoderBuffer>& encrypted,
- const DecryptCB& decrypt_cb) = 0;
-
- // Cancels the scheduled decryption operation for |stream_type| and fires the
- // pending DecryptCB immediately with kSuccess and NULL.
- // Decrypt() should not be called again before the pending DecryptCB for the
- // same |stream_type| is fired.
- virtual void CancelDecrypt(StreamType stream_type) = 0;
-
- // Indicates completion of audio/video decoder initialization.
- //
- // First Parameter: Indicates initialization success.
- // - Set to true if initialization was successful. False if an error occurred.
- typedef base::Callback<void(bool)> DecoderInitCB;
-
- // Initializes a decoder with the given |config|, executing the |init_cb|
- // upon completion.
- virtual void InitializeAudioDecoder(scoped_ptr<AudioDecoderConfig> config,
- const DecoderInitCB& init_cb) = 0;
- virtual void InitializeVideoDecoder(scoped_ptr<VideoDecoderConfig> config,
- const DecoderInitCB& init_cb) = 0;
-
- // Helper structure for managing multiple decoded audio buffers per input.
- // TODO(xhwang): Rename this to AudioFrames.
- typedef std::list<scoped_refptr<Buffer> > AudioBuffers;
-
- // Indicates completion of audio/video decrypt-and-decode operation.
- //
- // First parameter: The status of the decrypt-and-decode operation.
- // - Set to kSuccess if the encrypted buffer is successfully decrypted and
- // decoded. In this case, the decoded frame/buffers can be/contain:
- // 1) NULL, which means the operation has been aborted.
- // 2) End-of-stream (EOS) frame, which means that the decoder has hit EOS,
- // flushed all internal buffers and cannot produce more video frames.
- // 3) Decrypted and decoded video frame or audio buffer.
- // - Set to kNoKey if no decryption key is available to decrypt the encrypted
- // buffer. In this case the returned frame(s) must be NULL/empty.
- // - Set to kNeedMoreData if more data is needed to produce a video frame. In
- // this case the returned frame(s) must be NULL/empty.
- // - Set to kError if unexpected error has occurred. In this case the
- // returned frame(s) must be NULL/empty.
- // Second parameter: The decoded video frame or audio buffers.
- typedef base::Callback<void(Status, const AudioBuffers&)> AudioDecodeCB;
- typedef base::Callback<void(Status,
- const scoped_refptr<VideoFrame>&)> VideoDecodeCB;
-
- // Decrypts and decodes the |encrypted| buffer. The status and the decrypted
- // buffer are returned via the provided callback.
- // The |encrypted| buffer must not be NULL.
- // At end-of-stream, this method should be called repeatedly with
- // end-of-stream DecoderBuffer until no frame/buffer can be produced.
- // These methods can only be called after the corresponding decoder has
- // been successfully initialized.
- virtual void DecryptAndDecodeAudio(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const AudioDecodeCB& audio_decode_cb) = 0;
- virtual void DecryptAndDecodeVideo(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const VideoDecodeCB& video_decode_cb) = 0;
-
- // Resets the decoder to an initialized clean state, cancels any scheduled
- // decrypt-and-decode operations, and fires any pending
- // AudioDecodeCB/VideoDecodeCB immediately with kError and NULL.
- // This method can only be called after the corresponding decoder has been
- // successfully initialized.
- virtual void ResetDecoder(StreamType stream_type) = 0;
-
- // Releases decoder resources, deinitializes the decoder, cancels any
- // scheduled initialization or decrypt-and-decode operations, and fires
- // any pending DecoderInitCB/AudioDecodeCB/VideoDecodeCB immediately.
- // DecoderInitCB should be fired with false. AudioDecodeCB/VideoDecodeCB
- // should be fired with kError.
- // This method can be called any time after Initialize{Audio|Video}Decoder()
- // has been called (with the correct stream type).
- // After this operation, the decoder is set to an uninitialized state.
- // The decoder can be reinitialized after it is uninitialized.
- virtual void DeinitializeDecoder(StreamType stream_type) = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Decryptor);
-};
-
-// Callback to notify that a decryptor is ready.
-typedef base::Callback<void(Decryptor*)> DecryptorReadyCB;
-
-// Callback to set/cancel a DecryptorReadyCB.
-// Calling this callback with a non-null callback registers decryptor ready
-// notification. When the decryptor is ready, notification will be sent
-// through the provided callback.
-// Calling this callback with a null callback cancels previously registered
-// decryptor ready notification. Any previously provided callback will be
-// fired immediately with NULL.
-typedef base::Callback<void(const DecryptorReadyCB&)> SetDecryptorReadyCB;
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECRYPTOR_H_
diff --git a/src/media/base/decryptor_client.h b/src/media/base/decryptor_client.h
deleted file mode 100644
index 14d09a9..0000000
--- a/src/media/base/decryptor_client.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DECRYPTOR_CLIENT_H_
-#define MEDIA_BASE_DECRYPTOR_CLIENT_H_
-
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/base/decryptor.h"
-
-namespace media {
-
-// Interface used by a decryptor to fire key events.
-// See: http://dvcs.w3.org/hg/html-media/raw-file/tip/encrypted-media/encrypted-media.html#event-summary
-class DecryptorClient {
- public:
- // Signals that a key has been added.
- virtual void KeyAdded(const std::string& key_system,
- const std::string& session_id) = 0;
-
- // Signals that a key error occurred. The |system_code| is key
- // system-dependent. For clear key system, the |system_code| is always zero.
- virtual void KeyError(const std::string& key_system,
- const std::string& session_id,
- Decryptor::KeyError error_code,
- int system_code) = 0;
-
- // Signals that a key message has been generated.
- virtual void KeyMessage(const std::string& key_system,
- const std::string& session_id,
- const std::string& message,
- const std::string& default_url) = 0;
-
- // Signals that a key is needed for decryption. |key_system| and |session_id|
- // can be empty if the key system has not been selected.
- // TODO(xhwang): Figure out if "type" is optional for NeedKey fired from the
- // decoder.
- virtual void NeedKey(const std::string& key_system,
- const std::string& session_id,
- const std::string& type,
- scoped_array<uint8> init_data,
- int init_data_length) = 0;
-
- protected:
- virtual ~DecryptorClient() {}
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECRYPTOR_CLIENT_H_
diff --git a/src/media/base/demuxer.cc b/src/media/base/demuxer.cc
deleted file mode 100644
index 6cd4e29..0000000
--- a/src/media/base/demuxer.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/demuxer.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-DemuxerHost::~DemuxerHost() {}
-
-Demuxer::Demuxer() {}
-
-Demuxer::~Demuxer() {}
-
-void Demuxer::SetPlaybackRate(float playback_rate) {}
-
-void Demuxer::Seek(base::TimeDelta time, const PipelineStatusCB& status_cb) {
- DCHECK(!status_cb.is_null());
- status_cb.Run(PIPELINE_OK);
-}
-
-void Demuxer::Stop(const base::Closure& callback) {
- DCHECK(!callback.is_null());
- callback.Run();
-}
-
-void Demuxer::OnAudioRendererDisabled() {}
-
-} // namespace media
diff --git a/src/media/base/demuxer.h b/src/media/base/demuxer.h
deleted file mode 100644
index 34150af..0000000
--- a/src/media/base/demuxer.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DEMUXER_H_
-#define MEDIA_BASE_DEMUXER_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/base/data_source.h"
-#include "media/base/demuxer_stream.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-
-namespace media {
-
-class MEDIA_EXPORT DemuxerHost : public DataSourceHost {
- public:
- // Sets the duration of the media in microseconds.
- // Duration may be kInfiniteDuration() if the duration is not known.
- virtual void SetDuration(base::TimeDelta duration) = 0;
-
- // Stops execution of the pipeline due to a fatal error. Do not call this
- // method with PIPELINE_OK.
- virtual void OnDemuxerError(PipelineStatus error) = 0;
-
- protected:
- virtual ~DemuxerHost();
-};
-
-class MEDIA_EXPORT Demuxer : public base::RefCountedThreadSafe<Demuxer> {
- public:
- Demuxer();
-
- // Completes initialization of the demuxer.
- //
- // The demuxer does not own |host| as it is guaranteed to outlive the
- // lifetime of the demuxer. Don't delete it!
- virtual void Initialize(DemuxerHost* host,
- const PipelineStatusCB& status_cb) = 0;
-
- // The pipeline playback rate has been changed. Demuxers may implement this
- // method if they need to respond to this call.
- virtual void SetPlaybackRate(float playback_rate);
-
- // Carry out any actions required to seek to the given time, executing the
- // callback upon completion.
- virtual void Seek(base::TimeDelta time, const PipelineStatusCB& status_cb);
-
- // The pipeline is being stopped either as a result of an error or because
- // the client called Stop().
- virtual void Stop(const base::Closure& callback);
-
- // This method is called from the pipeline when the audio renderer
- // is disabled. Demuxers can ignore the notification if they do not
- // need to react to this event.
- //
- // TODO(acolwell): Change to generic DisableStream(DemuxerStream::Type).
- virtual void OnAudioRendererDisabled();
-
- // Returns the given stream type, or NULL if that type is not present.
- virtual scoped_refptr<DemuxerStream> GetStream(DemuxerStream::Type type) = 0;
-
- // Returns the starting time for the media file.
- virtual base::TimeDelta GetStartTime() const = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<Demuxer>;
- virtual ~Demuxer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Demuxer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DEMUXER_H_
diff --git a/src/media/base/demuxer_stream.cc b/src/media/base/demuxer_stream.cc
deleted file mode 100644
index daede65..0000000
--- a/src/media/base/demuxer_stream.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/demuxer_stream.h"
-
-namespace media {
-
-DemuxerStream::~DemuxerStream() {}
-
-} // namespace media
diff --git a/src/media/base/demuxer_stream.h b/src/media/base/demuxer_stream.h
deleted file mode 100644
index d9a16ea..0000000
--- a/src/media/base/demuxer_stream.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DEMUXER_STREAM_H_
-#define MEDIA_BASE_DEMUXER_STREAM_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/media_export.h"
-#include "media/base/ranges.h"
-
-namespace media {
-
-class AudioDecoderConfig;
-#if defined(__LB_SHELL__) || defined(COBALT)
-class Decryptor;
-#endif
-class DecoderBuffer;
-class VideoDecoderConfig;
-
-class MEDIA_EXPORT DemuxerStream
- : public base::RefCountedThreadSafe<DemuxerStream> {
- public:
- enum Type {
- UNKNOWN,
- AUDIO,
- VIDEO,
- NUM_TYPES, // Always keep this entry as the last one!
- };
-
- // Status returned in the Read() callback.
- // kOk : Indicates the second parameter is Non-NULL and contains media data
- // or the end of the stream.
- // kAborted : Indicates an aborted Read(). This can happen if the
- // DemuxerStream gets flushed and doesn't have any more data to
- // return. The second parameter MUST be NULL when this status is
- // returned.
- // kConfigChange : Indicates that the AudioDecoderConfig or
- // VideoDecoderConfig for the stream has changed.
- // The DemuxerStream expects an audio_decoder_config() or
- // video_decoder_config() call before Read() will start
- // returning DecoderBuffers again. The decoder will need this
- // new configuration to properly decode the buffers read
- // from this point forward. The second parameter MUST be NULL
- // when this status is returned.
- enum Status {
- kOk,
- kAborted,
- kConfigChanged,
- };
-
- // Request a buffer to returned via the provided callback.
- //
- // The first parameter indicates the status of the read.
- // The second parameter is non-NULL and contains media data
- // or the end of the stream if the first parameter is kOk. NULL otherwise.
- typedef base::Callback<void(Status,
- const scoped_refptr<DecoderBuffer>&)>ReadCB;
- virtual void Read(const ReadCB& read_cb) = 0;
-
- // Returns the audio decoder configuration. It is an error to call this method
- // if type() != AUDIO.
- virtual const AudioDecoderConfig& audio_decoder_config() = 0;
-
- // Returns the video decoder configuration. It is an error to call this method
- // if type() != VIDEO.
- virtual const VideoDecoderConfig& video_decoder_config() = 0;
-
- // Returns the type of stream.
- virtual Type type() = 0;
-
- virtual void EnableBitstreamConverter() = 0;
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // Returns true if the content was encrypted at some point
- virtual bool StreamWasEncrypted() const = 0;
-
- virtual Decryptor* GetDecryptor() const { return NULL; }
-#endif
-
- protected:
- friend class base::RefCountedThreadSafe<DemuxerStream>;
- virtual ~DemuxerStream();
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_DEMUXER_STREAM_H_
diff --git a/src/media/base/djb2.cc b/src/media/base/djb2.cc
deleted file mode 100644
index 8d47ed2..0000000
--- a/src/media/base/djb2.cc
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/djb2.h"
-
-uint32 DJB2Hash(const void* buf, size_t len, uint32 seed) {
- const uint8* src = reinterpret_cast<const uint8*>(buf);
- uint32 hash = seed;
- for (size_t i = 0; i < len; ++i) {
- hash = hash * 33 + src[i];
- }
- return hash;
-}
diff --git a/src/media/base/djb2.h b/src/media/base/djb2.h
deleted file mode 100644
index 598f9d1..0000000
--- a/src/media/base/djb2.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DJB2_H_
-#define MEDIA_BASE_DJB2_H_
-
-#include "base/basictypes.h"
-#include "media/base/media_export.h"
-
-// DJB2 is a hash algorithm with excellent distribution and speed
-// on many different sets.
-// It has marginally more collisions than FNV1, but makes up for it in
-// performance.
-// The return value is suitable for table lookups.
-// For small fixed sizes (ie a pixel), it has low overhead and inlines well.
-// For large data sets, it optimizes into assembly/simd and is appropriate
-// for realtime applications.
-// See Also:
-// http://www.cse.yorku.ca/~oz/hash.html
-
-static const uint32 kDJB2HashSeed = 5381u;
-
-// These functions perform DJB2 hash. The simplest call is DJB2Hash() to
-// generate the DJB2 hash of the given data:
-// uint32 hash = DJB2Hash(data1, length1, kDJB2HashSeed);
-//
-// You can also compute the DJB2 hash of data incrementally by making multiple
-// calls to DJB2Hash():
-// uint32 hash_value = kDJB2HashSeed; // Initial seed for DJB2.
-// for (size_t i = 0; i < copy_lines; ++i) {
-// hash_value = DJB2Hash(source, bytes_per_line, hash_value);
-// source += source_stride;
-// }
-
-// For the given buffer of data, compute the DJB2 hash of
-// the data. You can call this any number of times during the computation.
-MEDIA_EXPORT uint32 DJB2Hash(const void* buf, size_t len, uint32 seed);
-
-#endif // MEDIA_BASE_DJB2_H_
-
diff --git a/src/media/base/djb2_unittest.cc b/src/media/base/djb2_unittest.cc
deleted file mode 100644
index f7898aa..0000000
--- a/src/media/base/djb2_unittest.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2008 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/djb2.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-uint8 kTestData[] = { 1, 2, 3 };
-
-TEST(DJB2HashTest, HashTest) {
- EXPECT_EQ(DJB2Hash(NULL, 0, 0u), 0u);
- EXPECT_EQ(DJB2Hash(kTestData, sizeof(kTestData), 5381u),
- ((5381u * 33u + 1u) * 33u + 2u) * 33u + 3u);
-}
diff --git a/src/media/base/endian_util.h b/src/media/base/endian_util.h
deleted file mode 100644
index b9d6c02..0000000
--- a/src/media/base/endian_util.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef MEDIA_BASE_ENDIAN_UTIL_H_
-#define MEDIA_BASE_ENDIAN_UTIL_H_
-
-#include "base/sys_byteorder.h"
-
-namespace media {
-namespace endian_util {
-
-// The following functions must be able to support storing to/loading from
-// non-aligned memory. Thus, casts like "*reinterpret_cast<uint16_t*>(p)"
-// should be avoided as these can cause crashes due to alignment on some
-// platforms.
-
-// Load 2 little-endian bytes at |p| and return as a host-endian uint16_t.
-inline uint16_t load_uint16_little_endian(const uint8_t* p) {
- uint16_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::ByteSwapToLE16(aligned_p);
-}
-
-// Load 4 little-endian bytes at |p| and return as a host-endian uint32_t.
-inline uint32_t load_uint32_little_endian(const uint8_t* p) {
- uint32_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::ByteSwapToLE32(aligned_p);
-}
-
-// Load 8 little-endian bytes at |p| and return as a host-endian uint64_t.
-inline uint64_t load_uint64_little_endian(const uint8_t* p) {
- uint64_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::ByteSwapToLE64(aligned_p);
-}
-
-// Load 2 big-endian bytes at |p| and return as a host-endian uint16_t.
-inline uint16_t load_uint16_big_endian(const uint8_t* p) {
- uint16_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::NetToHost16(aligned_p);
-}
-
-// Load 4 big-endian bytes at |p| and return as a host-endian uint32_t.
-inline uint32_t load_uint32_big_endian(const uint8_t* p) {
- uint32_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::NetToHost32(aligned_p);
-}
-
-// Load 8 big-endian bytes at |p| and return as a host-endian uint64_t.
-inline uint64_t load_uint64_big_endian(const uint8_t* p) {
- uint64_t aligned_p;
- memcpy(&aligned_p, p, sizeof(aligned_p));
- return base::NetToHost64(aligned_p);
-}
-
-// Load 2 big-endian bytes at |p| and return as an host-endian int16_t.
-inline int16_t load_int16_big_endian(const uint8_t* p) {
- return static_cast<int16_t>(load_uint16_big_endian(p));
-}
-
-// Load 4 big-endian bytes at |p| and return as an host-endian int32_t.
-inline int32_t load_int32_big_endian(const uint8_t* p) {
- return static_cast<int32_t>(load_uint32_big_endian(p));
-}
-
-// Load 8 big-endian bytes at |p| and return as an host-endian int64_t.
-inline int64_t load_int64_big_endian(const uint8_t* p) {
- return static_cast<int64_t>(load_uint64_big_endian(p));
-}
-
-// Load 2 little-endian bytes at |p| and return as a host-endian int16_t.
-inline int16_t load_int16_little_endian(const uint8_t* p) {
- return static_cast<int16_t>(load_uint16_little_endian(p));
-}
-
-// Load 4 little-endian bytes at |p| and return as a host-endian int32_t.
-inline int32_t load_int32_little_endian(const uint8_t* p) {
- return static_cast<int32_t>(load_uint32_little_endian(p));
-}
-
-// Load 8 little-endian bytes at |p| and return as a host-endian int64_t.
-inline int64_t load_int64_little_endian(const uint8_t* p) {
- return static_cast<int64_t>(load_uint64_little_endian(p));
-}
-
-// Store 2 host-endian bytes as big-endian at |p|.
-inline void store_uint16_big_endian(uint16_t d, uint8_t* p) {
- uint16_t big_d = base::HostToNet16(d);
- memcpy(p, &big_d, sizeof(big_d));
-}
-
-// Store 4 host-endian bytes as big-endian at |p|.
-inline void store_uint32_big_endian(uint32_t d, uint8_t* p) {
- uint32_t big_d = base::HostToNet32(d);
- memcpy(p, &big_d, sizeof(big_d));
-}
-
-// Store 8 host-endian bytes as big-endian at |p|.
-inline void store_uint64_big_endian(uint64_t d, uint8_t* p) {
- uint64_t big_d = base::HostToNet64(d);
- memcpy(p, &big_d, sizeof(big_d));
-}
-
-// Store 2 host-endian bytes as little-endian at |p|.
-inline void store_uint16_little_endian(uint16_t d, uint8_t* p) {
- uint16_t little_d = base::ByteSwapToLE16(d);
- memcpy(p, &little_d, sizeof(little_d));
-}
-
-// Store 4 host-endian bytes as little-endian at |p|.
-inline void store_uint32_little_endian(uint32_t d, uint8_t* p) {
- uint32_t little_d = base::ByteSwapToLE32(d);
- memcpy(p, &little_d, sizeof(little_d));
-}
-
-// Store 8 host-endian bytes as little-endian at |p|.
-inline void store_uint64_little_endian(uint64_t d, uint8_t* p) {
- uint64_t little_d = base::ByteSwapToLE64(d);
- memcpy(p, &little_d, sizeof(little_d));
-}
-
-} // namespace endian_util
-} // namespace media
-
-#endif // MEDIA_BASE_ENDIAN_UTIL_H_
diff --git a/src/media/base/fake_audio_render_callback.cc b/src/media/base/fake_audio_render_callback.cc
deleted file mode 100644
index af55910..0000000
--- a/src/media/base/fake_audio_render_callback.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MSVC++ requires this to be set before any other includes to get M_PI.
-#define _USE_MATH_DEFINES
-
-#include <cmath>
-
-#include "media/base/fake_audio_render_callback.h"
-
-namespace media {
-
-FakeAudioRenderCallback::FakeAudioRenderCallback(double step)
- : half_fill_(false),
- step_(step),
- last_audio_delay_milliseconds_(-1),
- volume_(1) {
- reset();
-}
-
-FakeAudioRenderCallback::~FakeAudioRenderCallback() {}
-
-int FakeAudioRenderCallback::Render(AudioBus* audio_bus,
- int audio_delay_milliseconds) {
- last_audio_delay_milliseconds_ = audio_delay_milliseconds;
- int number_of_frames = audio_bus->frames();
- if (half_fill_)
- number_of_frames /= 2;
-
- // Fill first channel with a sine wave.
- for (int i = 0; i < number_of_frames; ++i)
- audio_bus->channel(0)[i] = sin(2 * M_PI * (x_ + step_ * i));
- x_ += number_of_frames * step_;
-
- // Copy first channel into the rest of the channels.
- for (int i = 1; i < audio_bus->channels(); ++i)
- memcpy(audio_bus->channel(i), audio_bus->channel(0),
- number_of_frames * sizeof(*audio_bus->channel(i)));
-
- return number_of_frames;
-}
-
-double FakeAudioRenderCallback::ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) {
- Render(audio_bus, buffer_delay.InMilliseconds());
- return volume_;
-}
-
-} // namespace media
diff --git a/src/media/base/fake_audio_render_callback.h b/src/media/base/fake_audio_render_callback.h
deleted file mode 100644
index 5318c99..0000000
--- a/src/media/base/fake_audio_render_callback.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
-#define MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
-
-#include "media/base/audio_converter.h"
-#include "media/base/audio_renderer_sink.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-// Fake RenderCallback which will fill each request with a sine wave. Sine
-// state is kept across callbacks. State can be reset to default via reset().
-// Also provide an interface to AudioTransformInput.
-class FakeAudioRenderCallback
- : public AudioRendererSink::RenderCallback,
- public AudioConverter::InputCallback {
- public:
- // The function used to fulfill Render() is f(x) = sin(2 * PI * x * |step|),
- // where x = [|number_of_frames| * m, |number_of_frames| * (m + 1)] and m =
- // the number of Render() calls fulfilled thus far.
- explicit FakeAudioRenderCallback(double step);
- virtual ~FakeAudioRenderCallback();
-
- // Renders a sine wave into the provided audio data buffer. If |half_fill_|
- // is set, will only fill half the buffer.
- virtual int Render(AudioBus* audio_bus,
- int audio_delay_milliseconds) OVERRIDE;
- MOCK_METHOD0(OnRenderError, void());
-
- // AudioTransform::ProvideAudioTransformInput implementation.
- virtual double ProvideInput(AudioBus* audio_bus,
- base::TimeDelta buffer_delay) OVERRIDE;
-
- // Toggles only filling half the requested amount during Render().
- void set_half_fill(bool half_fill) { half_fill_ = half_fill; }
-
- // Reset the sine state to initial value.
- void reset() { x_ = 0; }
-
- // Returns the last |audio_delay_milliseconds| provided to Render() or -1 if
- // no Render() call occurred.
- int last_audio_delay_milliseconds() { return last_audio_delay_milliseconds_; }
-
- // Set volume information used by ProvideAudioTransformInput().
- void set_volume(double volume) { volume_ = volume; }
-
- private:
- bool half_fill_;
- double x_;
- double step_;
- int last_audio_delay_milliseconds_;
- double volume_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeAudioRenderCallback);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_FAKE_AUDIO_RENDER_CALLBACK_H_
diff --git a/src/media/base/filter_collection.cc b/src/media/base/filter_collection.cc
deleted file mode 100644
index f82a61b..0000000
--- a/src/media/base/filter_collection.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/filter_collection.h"
-
-#include "base/logging.h"
-#include "media/base/audio_decoder.h"
-#include "media/base/audio_renderer.h"
-#include "media/base/demuxer.h"
-#include "media/base/video_decoder.h"
-#include "media/base/video_renderer.h"
-
-namespace media {
-
-FilterCollection::FilterCollection() {}
-
-FilterCollection::~FilterCollection() {}
-
-void FilterCollection::SetDemuxer(const scoped_refptr<Demuxer>& demuxer) {
- demuxer_ = demuxer;
-}
-
-const scoped_refptr<Demuxer>& FilterCollection::GetDemuxer() {
- return demuxer_;
-}
-
-void FilterCollection::AddAudioRenderer(AudioRenderer* audio_renderer) {
- audio_renderers_.push_back(audio_renderer);
-}
-
-void FilterCollection::AddVideoRenderer(VideoRenderer* video_renderer) {
- video_renderers_.push_back(video_renderer);
-}
-
-void FilterCollection::Clear() {
- audio_decoders_.clear();
- video_decoders_.clear();
- audio_renderers_.clear();
- video_renderers_.clear();
-}
-
-void FilterCollection::SelectAudioRenderer(scoped_refptr<AudioRenderer>* out) {
- if (audio_renderers_.empty()) {
- *out = NULL;
- return;
- }
- *out = audio_renderers_.front();
- audio_renderers_.pop_front();
-}
-
-void FilterCollection::SelectVideoRenderer(scoped_refptr<VideoRenderer>* out) {
- if (video_renderers_.empty()) {
- *out = NULL;
- return;
- }
- *out = video_renderers_.front();
- video_renderers_.pop_front();
-}
-
-FilterCollection::AudioDecoderList* FilterCollection::GetAudioDecoders() {
- return &audio_decoders_;
-}
-
-FilterCollection::VideoDecoderList* FilterCollection::GetVideoDecoders() {
- return &video_decoders_;
-}
-
-} // namespace media
diff --git a/src/media/base/filter_collection.h b/src/media/base/filter_collection.h
deleted file mode 100644
index e9f2be5..0000000
--- a/src/media/base/filter_collection.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_FILTER_COLLECTION_H_
-#define MEDIA_BASE_FILTER_COLLECTION_H_
-
-#include <list>
-
-#include "base/memory/ref_counted.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class AudioDecoder;
-class AudioRenderer;
-class Demuxer;
-class VideoDecoder;
-class VideoRenderer;
-
-// Represents a set of uninitialized demuxer and audio/video decoders and
-// renderers. Used to start a Pipeline object for media playback.
-//
-// TODO(scherkus): Replace FilterCollection with something sensible, see
-// http://crbug.com/110800
-class MEDIA_EXPORT FilterCollection {
- public:
- typedef std::list<scoped_refptr<AudioDecoder> > AudioDecoderList;
- typedef std::list<scoped_refptr<VideoDecoder> > VideoDecoderList;
-
- FilterCollection();
- ~FilterCollection();
-
- // Demuxer accessor methods.
- void SetDemuxer(const scoped_refptr<Demuxer>& demuxer);
- const scoped_refptr<Demuxer>& GetDemuxer();
-
- // Adds a filter to the collection.
- void AddAudioDecoder(AudioDecoder* audio_decoder);
- void AddAudioRenderer(AudioRenderer* audio_renderer);
- void AddVideoRenderer(VideoRenderer* video_renderer);
-
- // Remove remaining filters.
- void Clear();
-
- // Selects a filter of the specified type from the collection.
- // If the required filter cannot be found, NULL is returned.
- // If a filter is returned it is removed from the collection.
- // Filters are selected in FIFO order.
- void SelectAudioRenderer(scoped_refptr<AudioRenderer>* out);
- void SelectVideoRenderer(scoped_refptr<VideoRenderer>* out);
-
- AudioDecoderList* GetAudioDecoders();
- VideoDecoderList* GetVideoDecoders();
-
- private:
- scoped_refptr<Demuxer> demuxer_;
- AudioDecoderList audio_decoders_;
- VideoDecoderList video_decoders_;
- std::list<scoped_refptr<AudioRenderer> > audio_renderers_;
- std::list<scoped_refptr<VideoRenderer> > video_renderers_;
-
- DISALLOW_COPY_AND_ASSIGN(FilterCollection);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_FILTER_COLLECTION_H_
diff --git a/src/media/base/filter_collection_unittest.cc b/src/media/base/filter_collection_unittest.cc
deleted file mode 100644
index a8fdd70..0000000
--- a/src/media/base/filter_collection_unittest.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/filter_collection.h"
-#include "media/base/mock_filters.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class FilterCollectionTest : public ::testing::Test {
- public:
- FilterCollectionTest() {}
- virtual ~FilterCollectionTest() {}
-
- protected:
- FilterCollection collection_;
- MockFilterCollection mock_filters_;
-
- DISALLOW_COPY_AND_ASSIGN(FilterCollectionTest);
-};
-
-TEST_F(FilterCollectionTest, SelectXXXMethods) {
- scoped_refptr<AudioRenderer> audio_renderer;
-
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_FALSE(audio_renderer);
-
- // Add an audio decoder.
- collection_.AddAudioRenderer(mock_filters_.audio_renderer());
-
- // Verify that we can select the audio decoder.
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_TRUE(audio_renderer);
-
- // Verify that we can't select it again since only one has been added.
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_FALSE(audio_renderer);
-}
-
-TEST_F(FilterCollectionTest, MultipleFiltersOfSameType) {
- scoped_refptr<AudioRenderer> audio_renderer_a(new MockAudioRenderer());
- scoped_refptr<AudioRenderer> audio_renderer_b(new MockAudioRenderer());
-
- scoped_refptr<AudioRenderer> audio_renderer;
-
- collection_.AddAudioRenderer(audio_renderer_a.get());
- collection_.AddAudioRenderer(audio_renderer_b.get());
-
- // Verify that first SelectAudioRenderer() returns audio_renderer_a.
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_TRUE(audio_renderer);
- EXPECT_EQ(audio_renderer, audio_renderer_a);
-
- // Verify that second SelectAudioRenderer() returns audio_renderer_b.
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_TRUE(audio_renderer);
- EXPECT_EQ(audio_renderer, audio_renderer_b);
-
- // Verify that third SelectAudioRenderer() returns nothing.
- collection_.SelectAudioRenderer(&audio_renderer);
- EXPECT_FALSE(audio_renderer);
-}
-
-} // namespace media
diff --git a/src/media/base/gfx_export.h b/src/media/base/gfx_export.h
deleted file mode 100644
index 3106747..0000000
--- a/src/media/base/gfx_export.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef COBALT_MEDIA_BASE_GFX_EXPORT_H_
-#define COBALT_MEDIA_BASE_GFX_EXPORT_H_
-
-#if defined(COMPONENT_BUILD)
-#if defined(WIN32)
-
-#if defined(GFX_IMPLEMENTATION)
-#define GFX_EXPORT __declspec(dllexport)
-#else
-#define GFX_EXPORT __declspec(dllimport)
-#endif // defined(GFX_IMPLEMENTATION)
-
-#else // defined(WIN32)
-#if defined(GFX_IMPLEMENTATION)
-#define GFX_EXPORT __attribute__((visibility("default")))
-#else
-#define GFX_EXPORT
-#endif
-#endif
-
-#else // defined(COMPONENT_BUILD)
-#define GFX_EXPORT
-#endif
-
-#endif // COBALT_MEDIA_BASE_GFX_EXPORT_H_
diff --git a/src/media/base/gmock_callback_support.h b/src/media/base/gmock_callback_support.h
deleted file mode 100644
index 22f4c10..0000000
--- a/src/media/base/gmock_callback_support.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
-#define MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
-
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-// Matchers for base::Callback and base::Closure.
-
-MATCHER(IsNullCallback, "a null callback") {
- return (arg.is_null());
-}
-
-MATCHER(IsNotNullCallback, "a non-null callback") {
- return (!arg.is_null());
-}
-
-// The RunClosure<N>() action invokes Run() method on the N-th (0-based)
-// argument of the mock function.
-
-ACTION_TEMPLATE(RunClosure,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_0_VALUE_PARAMS()) {
- ::std::tr1::get<k>(args).Run();
-}
-
-// Various overloads for RunCallback<N>().
-//
-// The RunCallback<N>(p1, p2, ..., p_k) action invokes Run() method on the N-th
-// (0-based) argument of the mock function, with arguments p1, p2, ..., p_k.
-//
-// Notes:
-//
-// 1. The arguments are passed by value by default. If you need to
-// pass an argument by reference, wrap it inside ByRef(). For example,
-//
-// RunCallback<1>(5, string("Hello"), ByRef(foo))
-//
-// passes 5 and string("Hello") by value, and passes foo by reference.
-//
-// 2. If the callback takes an argument by reference but ByRef() is
-// not used, it will receive the reference to a copy of the value,
-// instead of the original value. For example, when the 0-th
-// argument of the callback takes a const string&, the action
-//
-// RunCallback<0>(string("Hello"))
-//
-// makes a copy of the temporary string("Hello") object and passes a
-// reference of the copy, instead of the original temporary object,
-// to the callback. This makes it easy for a user to define an
-// RunCallback action from temporary values and have it performed later.
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_0_VALUE_PARAMS()) {
- return ::std::tr1::get<k>(args).Run();
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_1_VALUE_PARAMS(p0)) {
- return ::std::tr1::get<k>(args).Run(p0);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_2_VALUE_PARAMS(p0, p1)) {
- return ::std::tr1::get<k>(args).Run(p0, p1);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_3_VALUE_PARAMS(p0, p1, p2)) {
- return ::std::tr1::get<k>(args).Run(p0, p1, p2);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
- return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
- return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
- return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4, p5);
-}
-
-ACTION_TEMPLATE(RunCallback,
- HAS_1_TEMPLATE_PARAMS(int, k),
- AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
- return ::std::tr1::get<k>(args).Run(p0, p1, p2, p3, p4, p5, p6);
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_GMOCK_CALLBACK_SUPPORT_H_
diff --git a/src/media/base/gmock_callback_support_unittest.cc b/src/media/base/gmock_callback_support_unittest.cc
deleted file mode 100644
index fb1beb9..0000000
--- a/src/media/base/gmock_callback_support_unittest.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/gmock_callback_support.h"
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using testing::ByRef;
-using testing::MockFunction;
-
-namespace media {
-
-typedef base::Callback<void(const bool& src, bool* dst)> TestCallback;
-
-void SetBool(const bool& src, bool* dst) {
- *dst = src;
-}
-
-TEST(GmockCallbackSupportTest, IsNullCallback) {
- MockFunction<void(const TestCallback&)> check;
- EXPECT_CALL(check, Call(IsNullCallback()));
- check.Call(TestCallback());
-}
-
-TEST(GmockCallbackSupportTest, IsNotNullCallback) {
- MockFunction<void(const TestCallback&)> check;
- EXPECT_CALL(check, Call(IsNotNullCallback()));
- check.Call(base::Bind(&SetBool));
-}
-
-TEST(GmockCallbackSupportTest, RunClosure) {
- MockFunction<void(const base::Closure&)> check;
- bool dst = false;
- EXPECT_CALL(check, Call(IsNotNullCallback()))
- .WillOnce(RunClosure<0>());
- check.Call(base::Bind(&SetBool, true, &dst));
- EXPECT_TRUE(dst);
-}
-
-TEST(GmockCallbackSupportTest, RunCallback0) {
- MockFunction<void(const TestCallback&)> check;
- bool dst = false;
- EXPECT_CALL(check, Call(IsNotNullCallback()))
- .WillOnce(RunCallback<0>(true, &dst));
- check.Call(base::Bind(&SetBool));
- EXPECT_TRUE(dst);
-}
-
-TEST(GmockCallbackSupportTest, RunCallback1) {
- MockFunction<void(int, const TestCallback&)> check;
- bool dst = false;
- EXPECT_CALL(check, Call(0, IsNotNullCallback()))
- .WillOnce(RunCallback<1>(true, &dst));
- check.Call(0, base::Bind(&SetBool));
- EXPECT_TRUE(dst);
-}
-
-TEST(GmockCallbackSupportTest, RunCallbackPassByRef) {
- MockFunction<void(const TestCallback&)> check;
- bool dst = false;
- bool src = false;
- EXPECT_CALL(check, Call(IsNotNullCallback()))
- .WillOnce(RunCallback<0>(ByRef(src), &dst));
- src = true;
- check.Call(base::Bind(&SetBool));
- EXPECT_TRUE(dst);
-}
-
-TEST(GmockCallbackSupportTest, RunCallbackPassByValue) {
- MockFunction<void(const TestCallback&)> check;
- bool dst = false;
- bool src = true;
- EXPECT_CALL(check, Call(IsNotNullCallback()))
- .WillOnce(RunCallback<0>(src, &dst));
- src = false;
- check.Call(base::Bind(&SetBool));
- EXPECT_TRUE(dst);
-}
-
-} // namespace media
diff --git a/src/media/base/hdr_metadata.cc b/src/media/base/hdr_metadata.cc
deleted file mode 100644
index 98dcd57..0000000
--- a/src/media/base/hdr_metadata.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/hdr_metadata.h"
-
-namespace media {
-
-MasteringMetadata::MasteringMetadata() {
- primary_r_chromaticity_x = 0;
- primary_r_chromaticity_y = 0;
- primary_g_chromaticity_x = 0;
- primary_g_chromaticity_y = 0;
- primary_b_chromaticity_x = 0;
- primary_b_chromaticity_y = 0;
- white_point_chromaticity_x = 0;
- white_point_chromaticity_y = 0;
- luminance_max = 0;
- luminance_min = 0;
-}
-
-HDRMetadata::HDRMetadata() {
- max_cll = 0;
- max_fall = 0;
-}
-
-} // namespace media
diff --git a/src/media/base/hdr_metadata.h b/src/media/base/hdr_metadata.h
deleted file mode 100644
index cc21972..0000000
--- a/src/media/base/hdr_metadata.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_HDR_METADATA_H_
-#define MEDIA_BASE_HDR_METADATA_H_
-
-#include "media/base/color_space.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// SMPTE ST 2086 mastering metadata.
-struct MEDIA_EXPORT MasteringMetadata {
- float primary_r_chromaticity_x;
- float primary_r_chromaticity_y;
- float primary_g_chromaticity_x;
- float primary_g_chromaticity_y;
- float primary_b_chromaticity_x;
- float primary_b_chromaticity_y;
- float white_point_chromaticity_x;
- float white_point_chromaticity_y;
- float luminance_max;
- float luminance_min;
-
- MasteringMetadata();
-
- bool operator==(const MasteringMetadata& rhs) const {
- return ((primary_r_chromaticity_x == rhs.primary_r_chromaticity_x) &&
- (primary_r_chromaticity_y == rhs.primary_r_chromaticity_y) &&
- (primary_g_chromaticity_x == rhs.primary_g_chromaticity_x) &&
- (primary_g_chromaticity_y == rhs.primary_g_chromaticity_y) &&
- (primary_b_chromaticity_x == rhs.primary_b_chromaticity_x) &&
- (primary_b_chromaticity_y == rhs.primary_b_chromaticity_y) &&
- (white_point_chromaticity_x == rhs.white_point_chromaticity_x) &&
- (white_point_chromaticity_y == rhs.white_point_chromaticity_y) &&
- (luminance_max == rhs.luminance_max) &&
- (luminance_min == rhs.luminance_min));
- }
-};
-
-// HDR metadata common for HDR10 and WebM/VP9-based HDR formats.
-struct MEDIA_EXPORT HDRMetadata {
- MasteringMetadata mastering_metadata;
- unsigned int max_cll;
- unsigned int max_fall;
-
- HDRMetadata();
-
- bool operator==(const HDRMetadata& rhs) const {
- return ((max_cll == rhs.max_cll) && (max_fall == rhs.max_fall) &&
- (mastering_metadata == rhs.mastering_metadata));
- }
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_HDR_METADATA_H_
diff --git a/src/media/base/icc_profile.cc b/src/media/base/icc_profile.cc
deleted file mode 100644
index 1e7b95d..0000000
--- a/src/media/base/icc_profile.cc
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "ui/gfx/icc_profile.h"
-
-#include <list>
-
-#include "base/containers/mru_cache.h"
-#include "base/lazy_instance.h"
-#include "base/synchronization/lock.h"
-#include "ui/gfx/color_transform.h"
-
-namespace gfx {
-
-namespace {
-const size_t kMinProfileLength = 128;
-const size_t kMaxProfileLength = 4 * 1024 * 1024;
-
-// Allow keeping around a maximum of 8 cached ICC profiles. Beware that
-// we will do a linear search thorugh currently-cached ICC profiles,
-// when creating a new ICC profile.
-const size_t kMaxCachedICCProfiles = 8;
-
-struct Cache {
- Cache() : id_to_icc_profile_mru(kMaxCachedICCProfiles) {}
- ~Cache() {}
-
- // Start from-ICC-data IDs at the end of the hard-coded list.
- uint64_t next_unused_id = 5;
- base::MRUCache<uint64_t, ICCProfile> id_to_icc_profile_mru;
- base::Lock lock;
-};
-static base::LazyInstance<Cache> g_cache;
-
-} // namespace
-
-ICCProfile::ICCProfile() = default;
-ICCProfile::ICCProfile(ICCProfile&& other) = default;
-ICCProfile::ICCProfile(const ICCProfile& other) = default;
-ICCProfile& ICCProfile::operator=(ICCProfile&& other) = default;
-ICCProfile& ICCProfile::operator=(const ICCProfile& other) = default;
-ICCProfile::~ICCProfile() = default;
-
-bool ICCProfile::operator==(const ICCProfile& other) const {
- if (type_ != other.type_)
- return false;
- switch (type_) {
- case Type::INVALID:
- return true;
- case Type::FROM_COLOR_SPACE:
- return color_space_ == other.color_space_;
- case Type::FROM_DATA:
- return data_ == other.data_;
- }
- return false;
-}
-
-// static
-ICCProfile ICCProfile::FromData(const char* data, size_t size) {
- ICCProfile icc_profile;
- if (IsValidProfileLength(size)) {
- icc_profile.type_ = Type::FROM_DATA;
- icc_profile.data_.insert(icc_profile.data_.begin(), data, data + size);
- } else {
- return ICCProfile();
- }
-
- Cache& cache = g_cache.Get();
- base::AutoLock lock(cache.lock);
-
- // Linearly search the cached ICC profiles to find one with the same data.
- // If it exists, re-use its id and touch it in the cache.
- for (auto iter = cache.id_to_icc_profile_mru.begin();
- iter != cache.id_to_icc_profile_mru.end(); ++iter) {
- if (icc_profile.data_ == iter->second.data_) {
- icc_profile = iter->second;
- cache.id_to_icc_profile_mru.Get(icc_profile.id_);
- return icc_profile;
- }
- }
-
- // Create a new cached id and add it to the cache.
- icc_profile.id_ = cache.next_unused_id++;
- icc_profile.color_space_ =
- ColorSpace(ColorSpace::PrimaryID::CUSTOM, ColorSpace::TransferID::CUSTOM,
- ColorSpace::MatrixID::RGB, ColorSpace::RangeID::FULL);
- icc_profile.color_space_.icc_profile_id_ = icc_profile.id_;
- cache.id_to_icc_profile_mru.Put(icc_profile.id_, icc_profile);
- return icc_profile;
-}
-
-#if !defined(OS_WIN) && !defined(OS_MACOSX) && !defined(USE_X11)
-// static
-ICCProfile ICCProfile::FromBestMonitor() {
- return ICCProfile();
-}
-#endif
-
-// static
-ICCProfile ICCProfile::FromColorSpace(const gfx::ColorSpace& color_space) {
- if (color_space == gfx::ColorSpace())
- return ICCProfile();
-
- // If |color_space| was created from an ICC profile, retrieve that exact
- // profile.
- if (color_space.icc_profile_id_) {
- Cache& cache = g_cache.Get();
- base::AutoLock lock(cache.lock);
-
- auto found = cache.id_to_icc_profile_mru.Get(color_space.icc_profile_id_);
- if (found != cache.id_to_icc_profile_mru.end()) {
- return found->second;
- }
- }
-
- // TODO(ccameron): Support constructing ICC profiles from arbitrary ColorSpace
- // objects.
- ICCProfile icc_profile;
- icc_profile.type_ = gfx::ICCProfile::Type::FROM_COLOR_SPACE;
- icc_profile.color_space_ = color_space;
- return icc_profile;
-}
-
-const std::vector<char>& ICCProfile::GetData() const {
- return data_;
-}
-
-ColorSpace ICCProfile::GetColorSpace() const {
- if (type_ == Type::INVALID)
- return gfx::ColorSpace();
- if (type_ == Type::FROM_COLOR_SPACE)
- return color_space_;
-
- ColorSpace color_space = color_space_;
-
- // Move this ICC profile to the most recently used end of the cache.
- {
- Cache& cache = g_cache.Get();
- base::AutoLock lock(cache.lock);
-
- auto found = cache.id_to_icc_profile_mru.Get(id_);
- if (found == cache.id_to_icc_profile_mru.end())
- cache.id_to_icc_profile_mru.Put(id_, *this);
- }
-
- ColorSpace unity_colorspace(
- ColorSpace::PrimaryID::CUSTOM, ColorSpace::TransferID::LINEAR,
- ColorSpace::MatrixID::RGB, ColorSpace::RangeID::FULL);
- unity_colorspace.custom_primary_matrix_[0] = 1.0f;
- unity_colorspace.custom_primary_matrix_[1] = 0.0f;
- unity_colorspace.custom_primary_matrix_[2] = 0.0f;
- unity_colorspace.custom_primary_matrix_[3] = 0.0f;
-
- unity_colorspace.custom_primary_matrix_[4] = 0.0f;
- unity_colorspace.custom_primary_matrix_[5] = 1.0f;
- unity_colorspace.custom_primary_matrix_[6] = 0.0f;
- unity_colorspace.custom_primary_matrix_[7] = 0.0f;
-
- unity_colorspace.custom_primary_matrix_[8] = 0.0f;
- unity_colorspace.custom_primary_matrix_[9] = 0.0f;
- unity_colorspace.custom_primary_matrix_[10] = 1.0f;
- unity_colorspace.custom_primary_matrix_[11] = 0.0f;
-
- // This will look up and use the ICC profile.
- std::unique_ptr<ColorTransform> transform(ColorTransform::NewColorTransform(
- color_space, unity_colorspace, ColorTransform::Intent::INTENT_ABSOLUTE));
-
- ColorTransform::TriStim tmp[4];
- tmp[0].set_x(1.0f);
- tmp[1].set_y(1.0f);
- tmp[2].set_z(1.0f);
- transform->transform(tmp, arraysize(tmp));
-
- color_space.custom_primary_matrix_[0] = tmp[0].x() - tmp[3].x();
- color_space.custom_primary_matrix_[1] = tmp[1].x() - tmp[3].x();
- color_space.custom_primary_matrix_[2] = tmp[2].x() - tmp[3].x();
- color_space.custom_primary_matrix_[3] = tmp[3].x();
-
- color_space.custom_primary_matrix_[4] = tmp[0].y() - tmp[3].y();
- color_space.custom_primary_matrix_[5] = tmp[1].y() - tmp[3].y();
- color_space.custom_primary_matrix_[6] = tmp[2].y() - tmp[3].y();
- color_space.custom_primary_matrix_[7] = tmp[3].y();
-
- color_space.custom_primary_matrix_[8] = tmp[0].z() - tmp[3].z();
- color_space.custom_primary_matrix_[9] = tmp[1].z() - tmp[3].z();
- color_space.custom_primary_matrix_[10] = tmp[2].z() - tmp[3].z();
- color_space.custom_primary_matrix_[11] = tmp[3].z();
-
- return color_space;
-}
-
-// static
-bool ICCProfile::IsValidProfileLength(size_t length) {
- return length >= kMinProfileLength && length <= kMaxProfileLength;
-}
-
-} // namespace gfx
diff --git a/src/media/base/icc_profile.h b/src/media/base/icc_profile.h
deleted file mode 100644
index 25cf632..0000000
--- a/src/media/base/icc_profile.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef UI_GFX_ICC_PROFILE_H_
-#define UI_GFX_ICC_PROFILE_H_
-
-#include <stdint.h>
-#include <vector>
-
-#include "base/gtest_prod_util.h"
-#include "media/base/color_space.h"
-
-#if defined(OS_MACOSX)
-#include <CoreGraphics/CGColorSpace.h>
-#endif
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
-
-namespace mojo {
-template <typename, typename>
-struct StructTraits;
-}
-
-namespace gfx {
-
-namespace mojom {
-class ICCProfileDataView;
-}
-
-// Used to represent a full ICC profile, usually retrieved from a monitor. It
-// can be lossily compressed into a ColorSpace object. This structure should
-// only be sent from higher-privilege processes to lower-privilege processes,
-// as parsing this structure is not secure.
-class GFX_EXPORT ICCProfile {
- public:
- ICCProfile();
- ICCProfile(ICCProfile&& other);
- ICCProfile(const ICCProfile& other);
- ICCProfile& operator=(ICCProfile&& other);
- ICCProfile& operator=(const ICCProfile& other);
- ~ICCProfile();
- bool operator==(const ICCProfile& other) const;
-
- // Returns the color profile of the monitor that can best represent color.
- // This profile should be used for creating content that does not know on
- // which monitor it will be displayed.
- static ICCProfile FromBestMonitor();
-#if defined(OS_MACOSX)
- static ICCProfile FromCGColorSpace(CGColorSpaceRef cg_color_space);
-#endif
-
- // This will recover a ICCProfile from a compact ColorSpace representation.
- // Internally, this will make an effort to create an identical ICCProfile
- // to the one that created |color_space|, but this is not guaranteed.
- static ICCProfile FromColorSpace(const gfx::ColorSpace& color_space);
-
- // Create directly from profile data.
- static ICCProfile FromData(const char* icc_profile, size_t size);
-
- // This will perform a potentially-lossy conversion to a more compact color
- // space representation.
- ColorSpace GetColorSpace() const;
-
- const std::vector<char>& GetData() const;
-
-#if defined(OS_WIN)
- // This will read monitor ICC profiles from disk and cache the results for the
- // other functions to read. This should not be called on the UI or IO thread.
- static void UpdateCachedProfilesOnBackgroundThread();
- static bool CachedProfilesNeedUpdate();
-#endif
-
- enum class Type {
- // This is not a valid profile.
- INVALID,
- // This is from a gfx::ColorSpace. This ensures that GetColorSpace returns
- // the exact same object as was used to create this.
- FROM_COLOR_SPACE,
- // This was created from ICC profile data.
- FROM_DATA,
- LAST = FROM_DATA
- };
-
- private:
- static bool IsValidProfileLength(size_t length);
-
- Type type_ = Type::INVALID;
- gfx::ColorSpace color_space_;
- std::vector<char> data_;
-
- // This globally identifies this ICC profile. It is used to look up this ICC
- // profile from a ColorSpace object created from it.
- uint64_t id_ = 0;
-
- FRIEND_TEST_ALL_PREFIXES(SimpleColorSpace, BT709toSRGBICC);
- FRIEND_TEST_ALL_PREFIXES(SimpleColorSpace, GetColorSpace);
- friend int ::LLVMFuzzerTestOneInput(const uint8_t*, size_t);
- friend class ColorSpace;
- friend struct IPC::ParamTraits<gfx::ICCProfile>;
- friend struct IPC::ParamTraits<gfx::ICCProfile::Type>;
- friend struct mojo::StructTraits<gfx::mojom::ICCProfileDataView,
- gfx::ICCProfile>;
-};
-
-} // namespace gfx
-
-#endif // UI_GFX_ICC_PROFILE_H_
diff --git a/src/media/base/interleaved_sinc_resampler.cc b/src/media/base/interleaved_sinc_resampler.cc
deleted file mode 100644
index d4df9ff..0000000
--- a/src/media/base/interleaved_sinc_resampler.cc
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Input buffer layout, dividing the total buffer into regions (r0_ - r5_):
-//
-// |----------------|-----------------------------------------|----------------|
-//
-// kBlockSize + kKernelSize / 2
-// <--------------------------------------------------------->
-// r0_
-//
-// kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
-// <---------------> <---------------> <---------------> <--------------->
-// r1_ r2_ r3_ r4_
-//
-// kBlockSize
-// <--------------------------------------->
-// r5_
-//
-// The algorithm:
-//
-// 1) Consume input frames into r0_ (r1_ is zero-initialized).
-// 2) Position kernel centered at start of r0_ (r2_) and generate output frames
-// until kernel is centered at start of r4_ or we've finished generating all
-// the output frames.
-// 3) Copy r3_ to r1_ and r4_ to r2_.
-// 4) Consume input frames into r5_ (zero-pad if we run out of input).
-// 5) Goto (2) until all of input is consumed.
-//
-// Note: we're glossing over how the sub-sample handling works with
-// |virtual_source_idx_|, etc.
-
-#include "media/base/interleaved_sinc_resampler.h"
-
-#include <algorithm>
-#include <cmath>
-
-#include "base/logging.h"
-
-namespace media {
-
-namespace {
-
-// The kernel size can be adjusted for quality (higher is better) at the
-// expense of performance. Must be a multiple of 32.
-const int kKernelSize = 32;
-
-// The number of destination frames generated per processing pass. Affects
-// how often and for how much InterleavedSincResampler calls back for input.
-// Must be greater than kKernelSize.
-const int kBlockSize = 512;
-
-// The kernel offset count is used for interpolation and is the number of
-// sub-sample kernel shifts. Can be adjusted for quality (higher is better)
-// at the expense of allocating more memory.
-const int kKernelOffsetCount = 32;
-const int kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1);
-
-// The size (in samples) of the internal buffer used by the resampler.
-const int kBufferSize = kBlockSize + kKernelSize;
-
-// The maximum numbers of buffer can be queued.
-const int kMaximumPendingBuffers = 8;
-
-} // namespace
-
-InterleavedSincResampler::InterleavedSincResampler(double io_sample_rate_ratio,
- int channel_count)
- : io_sample_rate_ratio_(io_sample_rate_ratio),
- virtual_source_idx_(0),
- buffer_primed_(false),
- channel_count_(channel_count),
- frame_size_in_bytes_(sizeof(float) * channel_count_),
- // Create buffers with a 16-byte alignment for possible optimizations.
- kernel_storage_(static_cast<float*>(
- base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
- input_buffer_(static_cast<float*>(
- base::AlignedAlloc(frame_size_in_bytes_ * kBufferSize, 16))),
- offset_in_frames_(0),
- frames_resampled_(0),
- frames_queued_(0),
- // Setup various region pointers in the buffer (see diagram above).
- r0_(input_buffer_.get() + kKernelSize / 2 * channel_count_),
- r1_(input_buffer_.get()),
- r2_(r0_),
- r3_(r0_ + (kBlockSize - kKernelSize / 2) * channel_count_),
- r4_(r0_ + kBlockSize * channel_count_),
- r5_(r0_ + kKernelSize / 2 * channel_count_) {
- // Ensure kKernelSize is a multiple of 32 for easy SSE optimizations; causes
- // r0_ and r5_ (used for input) to always be 16-byte aligned by virtue of
- // input_buffer_ being 16-byte aligned.
- DCHECK_EQ(kKernelSize % 32, 0) << "kKernelSize must be a multiple of 32!";
- DCHECK_GT(kBlockSize, kKernelSize)
- << "kBlockSize must be greater than kKernelSize!";
- // Basic sanity checks to ensure buffer regions are laid out correctly:
- // r0_ and r2_ should always be the same position.
- DCHECK_EQ(r0_, r2_);
- // r1_ at the beginning of the buffer.
- DCHECK_EQ(r1_, input_buffer_.get());
- // r1_ left of r2_, r2_ left of r5_ and r1_, r2_ size correct.
- DCHECK_EQ(r2_ - r1_, r5_ - r2_);
- // r3_ left of r4_, r5_ left of r0_ and r3_ size correct.
- DCHECK_EQ(r4_ - r3_, r5_ - r0_);
- // r3_, r4_ size correct and r4_ at the end of the buffer.
- DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize * channel_count_);
- // r5_ size correct and at the end of the buffer.
- DCHECK_EQ(r5_ + kBlockSize * channel_count_,
- r1_ + kBufferSize * channel_count_);
-
- memset(kernel_storage_.get(), 0,
- sizeof(*kernel_storage_.get()) * kKernelStorageSize);
- memset(input_buffer_.get(), 0, frame_size_in_bytes_ * kBufferSize);
-
- InitializeKernel();
-}
-
-void InterleavedSincResampler::InitializeKernel() {
- // Blackman window parameters.
- static const double kAlpha = 0.16;
- static const double kA0 = 0.5 * (1.0 - kAlpha);
- static const double kA1 = 0.5;
- static const double kA2 = 0.5 * kAlpha;
-
- // |sinc_scale_factor| is basically the normalized cutoff frequency of the
- // low-pass filter.
- double sinc_scale_factor =
- io_sample_rate_ratio_ > 1.0 ? 1.0 / io_sample_rate_ratio_ : 1.0;
-
- // The sinc function is an idealized brick-wall filter, but since we're
- // windowing it the transition from pass to stop does not happen right away.
- // So we should adjust the low pass filter cutoff slightly downward to avoid
- // some aliasing at the very high-end.
- // TODO(crogers): this value is empirical and to be more exact should vary
- // depending on kKernelSize.
- sinc_scale_factor *= 0.9;
-
- // Generates a set of windowed sinc() kernels.
- // We generate a range of sub-sample offsets from 0.0 to 1.0.
- for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
- double subsample_offset =
- static_cast<double>(offset_idx) / kKernelOffsetCount;
-
- for (int i = 0; i < kKernelSize; ++i) {
- // Compute the sinc with offset.
- double s =
- sinc_scale_factor * M_PI * (i - kKernelSize / 2 - subsample_offset);
- double sinc = (!s ? 1.0 : sin(s) / s) * sinc_scale_factor;
-
- // Compute Blackman window, matching the offset of the sinc().
- double x = (i - subsample_offset) / kKernelSize;
- double window =
- kA0 - kA1 * cos(2.0 * M_PI * x) + kA2 * cos(4.0 * M_PI * x);
-
- // Window the sinc() function and store at the correct offset.
- kernel_storage_.get()[i + offset_idx * kKernelSize] = sinc * window;
- }
- }
-}
-
-void InterleavedSincResampler::QueueBuffer(
- const scoped_refptr<Buffer>& buffer) {
- DCHECK(buffer);
- DCHECK(CanQueueBuffer());
-
- if (!pending_buffers_.empty() && pending_buffers_.back()->IsEndOfStream()) {
- DCHECK(buffer->IsEndOfStream());
- return;
- }
-
- if (!buffer->IsEndOfStream()) {
- frames_queued_ += buffer->GetDataSize() / frame_size_in_bytes_;
- }
-
- pending_buffers_.push(buffer);
-}
-
-bool InterleavedSincResampler::Resample(float* destination, int frames) {
- if (!HasEnoughData(frames)) {
- return false;
- }
-
- int remaining_frames = frames;
-
- // Step (1) -- Prime the input buffer at the start of the input stream.
- if (!buffer_primed_) {
- Read(r0_, kBlockSize + kKernelSize / 2);
- buffer_primed_ = true;
- }
-
- // Step (2) -- Resample!
- while (remaining_frames) {
- while (virtual_source_idx_ < kBlockSize) {
- // |virtual_source_idx_| lies in between two kernel offsets so figure out
- // what they are.
- int source_idx = static_cast<int>(virtual_source_idx_);
- double subsample_remainder = virtual_source_idx_ - source_idx;
-
- double virtual_offset_idx = subsample_remainder * kKernelOffsetCount;
- int offset_idx = static_cast<int>(virtual_offset_idx);
-
- // We'll compute "convolutions" for the two kernels which straddle
- // |virtual_source_idx_|.
- float* k1 = kernel_storage_.get() + offset_idx * kKernelSize;
- float* k2 = k1 + kKernelSize;
-
- // Initialize input pointer based on quantized |virtual_source_idx_|.
- float* input_ptr = r1_ + source_idx * channel_count_;
-
- // Figure out how much to weight each kernel's "convolution".
- double kernel_interpolation_factor = virtual_offset_idx - offset_idx;
- for (int i = 0; i < channel_count_; ++i) {
- *destination++ =
- Convolve(input_ptr + i, k1, k2, kernel_interpolation_factor);
- }
-
- // Advance the virtual index.
- virtual_source_idx_ += io_sample_rate_ratio_;
-
- if (!--remaining_frames) {
- frames_resampled_ += frames;
- return true;
- }
- }
-
- // Wrap back around to the start.
- virtual_source_idx_ -= kBlockSize;
-
- // Step (3) Copy r3_ to r1_ and r4_ to r2_.
- // This wraps the last input frames back to the start of the buffer.
- memcpy(r1_, r3_, frame_size_in_bytes_ * (kKernelSize / 2));
- memcpy(r2_, r4_, frame_size_in_bytes_ * (kKernelSize / 2));
-
- // Step (4)
- // Refresh the buffer with more input.
- Read(r5_, kBlockSize);
- }
-
- NOTREACHED();
- return false;
-}
-
-void InterleavedSincResampler::Flush() {
- virtual_source_idx_ = 0;
- buffer_primed_ = false;
- memset(input_buffer_.get(), 0, frame_size_in_bytes_ * kBufferSize);
- while (!pending_buffers_.empty()) {
- pending_buffers_.pop();
- }
- offset_in_frames_ = 0;
- frames_resampled_ = 0;
- frames_queued_ = 0;
-}
-
-bool InterleavedSincResampler::CanQueueBuffer() const {
- if (pending_buffers_.empty()) {
- return true;
- }
- if (pending_buffers_.back()->IsEndOfStream()) {
- return false;
- }
- return pending_buffers_.size() < kMaximumPendingBuffers;
-}
-
-bool InterleavedSincResampler::ReachedEOS() const {
- if (pending_buffers_.empty() || !pending_buffers_.back()->IsEndOfStream()) {
- return false;
- }
- return frames_resampled_ * io_sample_rate_ratio_ >= frames_queued_;
-}
-
-bool InterleavedSincResampler::HasEnoughData(int frames_to_resample) const {
- // Always return true if EOS is seen, as in this case we will just fill 0.
- if (!pending_buffers_.empty() && pending_buffers_.back()->IsEndOfStream()) {
- return true;
- }
-
- // We have to decrease frames_queued_ down as the Read()s are always done in
- // blocks of kBlockSize or kBufferSize. We have to ensure that there is buffer
- // for an extra Read().
- return (frames_resampled_ + frames_to_resample) * io_sample_rate_ratio_ <
- frames_queued_ - kBufferSize;
-}
-
-void InterleavedSincResampler::Read(float* destination, int frames) {
- while (frames > 0 && !pending_buffers_.empty()) {
- scoped_refptr<Buffer> buffer = pending_buffers_.front();
- if (buffer->IsEndOfStream()) {
- // Zero fill the buffer after EOS has reached.
- memset(destination, 0, frame_size_in_bytes_ * frames);
- return;
- }
- // Copy the data over.
- int frames_in_buffer = buffer->GetDataSize() / frame_size_in_bytes_;
- int frames_to_copy = std::min(frames_in_buffer - offset_in_frames_, frames);
- const uint8* source = buffer->GetData();
- source += frame_size_in_bytes_ * offset_in_frames_;
- memcpy(destination, source, frame_size_in_bytes_ * frames_to_copy);
- offset_in_frames_ += frames_to_copy;
- // Pop the first buffer if all its content has been read.
- if (offset_in_frames_ == frames_in_buffer) {
- offset_in_frames_ = 0;
- pending_buffers_.pop();
- }
- frames -= frames_to_copy;
- destination += frames_to_copy * channel_count_;
- }
-
- // Read should always be satisfied as otherwise Resample should return false
- // to the caller directly.
- DCHECK_EQ(frames, 0);
-}
-
-float InterleavedSincResampler::Convolve(const float* input_ptr,
- const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- float sum1 = 0;
- float sum2 = 0;
-
- // Generate a single output sample. Unrolling this loop hurt performance in
- // local testing.
- int n = kKernelSize;
- while (n--) {
- sum1 += *input_ptr * *k1++;
- sum2 += *input_ptr * *k2++;
- input_ptr += channel_count_;
- }
-
- // Linearly interpolate the two "convolutions".
- return (1.0 - kernel_interpolation_factor) * sum1 +
- kernel_interpolation_factor * sum2;
-}
-
-} // namespace media
diff --git a/src/media/base/interleaved_sinc_resampler.h b/src/media/base/interleaved_sinc_resampler.h
deleted file mode 100644
index edfdada..0000000
--- a/src/media/base/interleaved_sinc_resampler.h
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_INTERLEAVED_SINC_RESAMPLER_H_
-#define MEDIA_BASE_INTERLEAVED_SINC_RESAMPLER_H_
-
-#include <queue>
-
-#include "base/memory/aligned_memory.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/buffers.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// InterleavedSincResampler is a high-quality interleaved multi-channel sample
-//-rate converter operating on samples in float. It uses the same algorithm as
-// SincResampler. Unlike SincResampler, it works in push mode instead of pull
-// mode.
-class MEDIA_EXPORT InterleavedSincResampler {
- public:
- // |io_sample_rate_ratio| is the ratio of input / output sample rates.
- // |channel_count| is the number of channels in the interleaved audio stream.
- InterleavedSincResampler(double io_sample_rate_ratio, int channel_count);
-
- // Append a buffer to the queue. The samples in the buffer has to be floats.
- void QueueBuffer(const scoped_refptr<Buffer>& buffer);
-
- // Resample |frames| of data from enqueued buffers. Return false if no sample
- // is read. Return true if all requested samples have been written into
- // |destination|. It will never do a partial read. After the stream reaches
- // the end, the function will fill the rest of buffer with 0.
- bool Resample(float* destination, int frames);
-
- // Flush all buffered data and reset internal indices.
- void Flush();
-
- // Return false if we shouldn't queue more buffers to the resampler.
- bool CanQueueBuffer() const;
-
- // Returning true when we start to return zero filled data because of EOS.
- bool ReachedEOS() const;
-
- private:
- void InitializeKernel();
- bool HasEnoughData(int frames_to_resample) const;
- void Read(float* destination, int frames);
-
- float Convolve(const float* input_ptr,
- const float* k1,
- const float* k2,
- double kernel_interpolation_factor);
-
- // The ratio of input / output sample rates.
- double io_sample_rate_ratio_;
-
- // An index on the source input buffer with sub-sample precision. It must be
- // double precision to avoid drift.
- double virtual_source_idx_;
-
- // The buffer is primed once at the very beginning of processing.
- bool buffer_primed_;
-
- // Number of audio channels.
- int channel_count_;
-
- // The size of bytes for an audio frame.
- const int frame_size_in_bytes_;
-
- // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
- // The kernel offsets are sub-sample shifts of a windowed sinc shifted from
- // 0.0 to 1.0 sample.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> kernel_storage_;
-
- // Data from the source is copied into this buffer for each processing pass.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_buffer_;
-
- // A queue of buffers to be resampled.
- std::queue<scoped_refptr<Buffer> > pending_buffers_;
-
- // The current offset to read when reading from the first pending buffer.
- int offset_in_frames_;
-
- // The following two variables are used to calculate EOS and in HasEnoughData.
- int frames_resampled_;
- int frames_queued_;
-
- // Pointers to the various regions inside |input_buffer_|. See the diagram at
- // the top of the .cc file for more information.
- float* const r0_;
- float* const r1_;
- float* const r2_;
- float* const r3_;
- float* const r4_;
- float* const r5_;
-
- DISALLOW_COPY_AND_ASSIGN(InterleavedSincResampler);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_INTERLEAVED_SINC_RESAMPLER_H_
diff --git a/src/media/base/interleaved_sinc_resampler_unittest.cc b/src/media/base/interleaved_sinc_resampler_unittest.cc
deleted file mode 100644
index a8f2e15..0000000
--- a/src/media/base/interleaved_sinc_resampler_unittest.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <math.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "media/base/interleaved_sinc_resampler.h"
-#include "media/base/sinc_resampler.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace {
-
-// Used to compare if two samples are the same. Because the resampled result
-// from the SincResampler and the InterleavedSincResampler can be slightly
-// different as the first one may use the SSE Convolve function.
-const float kEpsilon = 0.0001f;
-
-bool AreSamplesSame(float sample1, float sample2) {
- return fabs(sample1 - sample2) < kEpsilon;
-}
-
-// Function to provide the audio data of a single channel indicated by
-// |channel_index| inside a multi channel audio stream to SincResampler.
-void ReadCB(const float* source,
- const int source_size,
- int channel_index,
- int channel_count,
- int* offset,
- float* destination,
- int samples) {
- int samples_to_copy = std::min(source_size - *offset, samples);
- samples -= samples_to_copy;
-
- while (samples_to_copy != 0) {
- *destination++ = source[*offset * channel_count + channel_index];
- --samples_to_copy;
- ++*offset;
- }
- if (samples != 0) {
- memset(destination, 0, sizeof(float) * samples);
- }
-}
-
-class TestBuffer : public Buffer {
- public:
- TestBuffer(const void* data, int data_size)
- : Buffer(base::TimeDelta(), base::TimeDelta()),
- data_(static_cast<const uint8*>(data)),
- data_size_(data_size) {}
-
- const uint8* GetData() const OVERRIDE { return data_; }
-
- int GetDataSize() const OVERRIDE { return data_size_; }
-
- private:
- const uint8* data_;
- int data_size_;
-};
-
-} // namespace
-
-TEST(InterleavedSincResamplerTest, InitialState) {
- InterleavedSincResampler interleaved_resampler(1, 1);
- float output[1];
-
- ASSERT_FALSE(interleaved_resampler.ReachedEOS());
- ASSERT_TRUE(interleaved_resampler.CanQueueBuffer());
- ASSERT_FALSE(interleaved_resampler.Resample(output, 1));
-}
-
-TEST(InterleavedSincResamplerTest, Read) {
- const int kInputFrames = 1024;
- const int kOutputFrames = kInputFrames * 2;
- float samples[kInputFrames] = {0.0};
- float output[kOutputFrames];
-
- InterleavedSincResampler interleaved_resampler(
- static_cast<double>(kInputFrames) / kOutputFrames, 1);
-
- interleaved_resampler.QueueBuffer(new TestBuffer(samples, sizeof(samples)));
- ASSERT_FALSE(interleaved_resampler.Resample(output, kOutputFrames + 1));
-
- while (interleaved_resampler.CanQueueBuffer()) {
- interleaved_resampler.QueueBuffer(new TestBuffer(samples, sizeof(samples)));
- }
-
- // There is really no guarantee that we can read more.
- ASSERT_TRUE(interleaved_resampler.Resample(output, 1));
-}
-
-TEST(InterleavedSincResamplerTest, ReachedEOS) {
- const int kInputFrames = 512 * 3 + 32;
- const int kOutputFrames = kInputFrames * 2;
- float input[kInputFrames] = {0.0};
-
- InterleavedSincResampler interleaved_resampler(
- static_cast<double>(kInputFrames) / kOutputFrames, 1);
-
- interleaved_resampler.QueueBuffer(new TestBuffer(input, sizeof(input)));
- interleaved_resampler.QueueBuffer(new TestBuffer(NULL, 0)); // EOS
-
- ASSERT_FALSE(interleaved_resampler.ReachedEOS());
-
- float output[kOutputFrames];
-
- ASSERT_TRUE(interleaved_resampler.Resample(output, kOutputFrames - 4));
- ASSERT_FALSE(interleaved_resampler.ReachedEOS());
-
- ASSERT_TRUE(interleaved_resampler.Resample(output, 4));
- ASSERT_TRUE(interleaved_resampler.ReachedEOS());
-}
-
-// As InterleavedSincResampler is just the interleaved version of SincResampler,
-// the following unit tests just try to verify that the results of using
-// InterleavedSincResampler are the same as using SincResampler on individual
-// channel.
-TEST(InterleavedSincResamplerTest, ResampleSingleChannel) {
- const int kInputFrames = 1719;
- // Read twice of the frames out to ensure that we saturate the input frames.
- const int kOutputFrames = kInputFrames * 2;
- const double kResampleRatio = 44100. / 48000.;
- float input[kInputFrames];
-
- // Filled the samples
- for (int i = 0; i < kInputFrames; ++i) {
- input[i] = i / static_cast<float>(kInputFrames);
- }
-
- int offset = 0;
- SincResampler sinc_resampler(
- kResampleRatio, base::Bind(ReadCB, input, kInputFrames, 0, 1, &offset));
- InterleavedSincResampler interleaved_resampler(kResampleRatio, 1);
-
- interleaved_resampler.QueueBuffer(new TestBuffer(input, sizeof(input)));
- interleaved_resampler.QueueBuffer(new TestBuffer(NULL, 0)); // EOS
-
- float non_interleaved_output[kOutputFrames];
- float interleaved_output[kOutputFrames];
-
- sinc_resampler.Resample(non_interleaved_output, kOutputFrames);
- ASSERT_TRUE(
- interleaved_resampler.Resample(interleaved_output, kOutputFrames));
-
- for (int i = 0; i < kOutputFrames; ++i) {
- ASSERT_TRUE(
- AreSamplesSame(non_interleaved_output[i], interleaved_output[i]));
- }
-}
-
-TEST(InterleavedSincResamplerTest, ResampleMultipleChannels) {
- const int kChannelCount = 3;
- const int kInputFrames = 8737;
- // Read twice of the frames out to ensure that we saturate the input frames.
- const int kOutputFrames = kInputFrames * 2;
- const double kResampleRatio = 44100. / 48000.;
- float input[kInputFrames * kChannelCount];
-
- // Filled the buffer with different samples per frame on different channels.
- for (int i = 0; i < kInputFrames * kChannelCount; ++i) {
- input[i] = i / static_cast<float>(kInputFrames * kChannelCount);
- }
-
- float non_interleaved_outputs[kChannelCount][kInputFrames * 2];
-
- for (int i = 0; i < kChannelCount; ++i) {
- int offset = 0;
- SincResampler sinc_resampler(
- kResampleRatio,
- base::Bind(ReadCB, input, kInputFrames, i, kChannelCount, &offset));
- sinc_resampler.Resample(non_interleaved_outputs[i], kOutputFrames);
- }
-
- InterleavedSincResampler interleaved_resampler(kResampleRatio, kChannelCount);
- interleaved_resampler.QueueBuffer(new TestBuffer(input, sizeof(input)));
- interleaved_resampler.QueueBuffer(new TestBuffer(NULL, 0)); // EOS
-
- float interleaved_output[kOutputFrames * kChannelCount];
-
- ASSERT_TRUE(
- interleaved_resampler.Resample(interleaved_output, kOutputFrames));
-
- for (int i = 0; i < kOutputFrames; ++i) {
- for (int channel = 0; channel < kChannelCount; ++channel) {
- ASSERT_TRUE(
- AreSamplesSame(non_interleaved_outputs[channel][i],
- interleaved_output[i * kChannelCount + channel]));
- }
- }
-}
-
-TEST(InterleavedSincResamplerTest, Benchmark) {
- const int kChannelCount = 8;
- const int kInputFrames = 44100;
- const int kNumberOfIterations = 100;
- const int kOutputFrames = kInputFrames * 2;
- const double kResampleRatio = 44100. / 48000.;
- std::vector<float> input(kInputFrames * kChannelCount);
-
- // Filled the buffer with different samples per frame on different channels.
- for (int i = 0; i < kInputFrames * kChannelCount; ++i) {
- input[i] = i / static_cast<float>(kInputFrames * kChannelCount);
- }
-
- InterleavedSincResampler interleaved_resampler(kResampleRatio, kChannelCount);
-
- base::TimeTicks start = base::TimeTicks::HighResNow();
- std::vector<float> interleaved_output(kOutputFrames * kChannelCount);
- int total_output_frames = 0;
-
- for (int i = 0; i < kNumberOfIterations; ++i) {
- interleaved_resampler.QueueBuffer(
- new TestBuffer(&input[0], sizeof(float) * input.size()));
- if (interleaved_resampler.Resample(&interleaved_output[0], kOutputFrames)) {
- total_output_frames += kOutputFrames;
- }
- }
-
- double total_time_c_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(
- "Benchmarking InterleavedSincResampler in %d channels for %d "
- "iterations took %.4gms.\n:\n",
- kChannelCount, kNumberOfIterations, total_time_c_ms);
-
- start = base::TimeTicks::HighResNow();
-
- int offset = 0;
- SincResampler sinc_resampler(
- kResampleRatio,
- base::Bind(ReadCB, &input[0], kInputFrames, 0, 1, &offset));
-
- while (total_output_frames > 0) {
- sinc_resampler.Resample(&interleaved_output[0], kOutputFrames);
- total_output_frames -= kOutputFrames;
- // Set offset to 0 so we'll never reach EOS to enforce a sample by sample
- // copy for every frame as previously we have to convert interleaved stream
- // to non-interleaved stream to use MultiChannelResampler and then convert
- // the result stream back to interleaved.
- offset = 0;
- }
-
- total_time_c_ms = (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(
- "Benchmarking SincResampler with one channel for %d iterations took "
- "%.4gms.\n:\n",
- kNumberOfIterations, total_time_c_ms);
-}
-
-} // namespace media
diff --git a/src/media/base/limits.h b/src/media/base/limits.h
deleted file mode 100644
index 3eff5f4..0000000
--- a/src/media/base/limits.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Contains limit definition constants for the media subsystem.
-
-#ifndef MEDIA_BASE_LIMITS_H_
-#define MEDIA_BASE_LIMITS_H_
-
-#include "base/basictypes.h"
-
-namespace media {
-
-namespace limits {
-
-enum {
- // Maximum possible dimension (width or height) for any video.
- kMaxDimension = (1 << 15) - 1, // 32767
-
- // Maximum possible canvas size (width multiplied by height) for any video.
- kMaxCanvas = (1 << (14 * 2)), // 16384 x 16384
-
- // Total number of video frames which are populating in the pipeline.
- kMaxVideoFrames = 4,
-
- // The following limits are used by AudioParameters::IsValid().
- //
- // A few notes on sample rates of common formats:
- // - AAC files are limited to 96 kHz.
- // - MP3 files are limited to 48 kHz.
- // - Vorbis used to be limited to 96 KHz, but no longer has that
- // restriction.
- // - Most PC audio hardware is limited to 192 KHz.
- kMaxSampleRate = 192000,
- kMinSampleRate = 3000,
- kMaxChannels = 32,
- kMaxBitsPerSample = 64,
- kMaxSamplesPerPacket = kMaxSampleRate,
- kMaxPacketSizeInBytes =
- (kMaxBitsPerSample / 8) * kMaxChannels * kMaxSamplesPerPacket,
-
- // This limit is used by ParamTraits<VideoCaptureParams>.
- kMaxFramesPerSecond = 1000,
-};
-
-} // namespace limits
-
-} // namespace media
-
-#endif // MEDIA_BASE_LIMITS_H_
diff --git a/src/media/base/media.h b/src/media/base/media.h
deleted file mode 100644
index 277c740..0000000
--- a/src/media/base/media.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Contains code that should be used for initializing, or querying the state
-// of the media library as a whole.
-
-#ifndef MEDIA_BASE_MEDIA_H_
-#define MEDIA_BASE_MEDIA_H_
-
-#include "media/base/media_export.h"
-
-class FilePath;
-
-namespace media {
-
-// Attempts to initialize the media library (loading DLLs, DSOs, etc.).
-//
-// If |module_dir| is the emptry string, then the system default library paths
-// are searched for the dynamic libraries. If a |module_dir| is provided, then
-// only the specified |module_dir| will be searched for the dynamic libraries.
-//
-// If multiple initializations are attempted with different |module_dir|s
-// specified then the first one to succeed remains effective for the lifetime
-// of the process.
-//
-// Returns true if everything was successfully initialized, false otherwise.
-MEDIA_EXPORT bool InitializeMediaLibrary(const FilePath& module_dir);
-
-// Helper function for unit tests to avoid boiler plate code everywhere. This
-// function will crash if it fails to load the media library. This ensures tests
-// fail if the media library is not available.
-MEDIA_EXPORT void InitializeMediaLibraryForTesting();
-
-// Use this if you need to check whether the media library is initialized
-// for the this process, without actually trying to initialize it.
-MEDIA_EXPORT bool IsMediaLibraryInitialized();
-
-} // namespace media
-
-#endif // MEDIA_BASE_MEDIA_H_
diff --git a/src/media/base/media_export.h b/src/media/base/media_export.h
deleted file mode 100644
index 44ccef7..0000000
--- a/src/media/base/media_export.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MEDIA_EXPORT_H_
-#define MEDIA_BASE_MEDIA_EXPORT_H_
-
-// Define MEDIA_EXPORT so that functionality implemented by the Media module
-// can be exported to consumers.
-
-#if defined(COMPONENT_BUILD)
-#if defined(_MSC_VER)
-
-#if defined(MEDIA_IMPLEMENTATION)
-#define MEDIA_EXPORT __declspec(dllexport)
-#else
-#define MEDIA_EXPORT __declspec(dllimport)
-#endif // defined(MEDIA_IMPLEMENTATION)
-
-#else // defined(WIN32)
-#if defined(MEDIA_IMPLEMENTATION)
-#define MEDIA_EXPORT __attribute__((visibility("default")))
-#else
-#define MEDIA_EXPORT
-#endif
-#endif
-
-#else // defined(COMPONENT_BUILD)
-#define MEDIA_EXPORT
-#endif
-
-#endif // MEDIA_BASE_MEDIA_EXPORT_H_
diff --git a/src/media/base/media_log.cc b/src/media/base/media_log.cc
deleted file mode 100644
index eb8ebbb..0000000
--- a/src/media/base/media_log.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/media_log.h"
-
-#include <string>
-
-#include "base/atomic_sequence_num.h"
-#include "base/logging.h"
-#include "base/values.h"
-
-namespace media {
-
-// A count of all MediaLogs created in the current process. Used to generate
-// unique IDs.
-static base::StaticAtomicSequenceNumber g_media_log_count;
-
-const char* MediaLog::EventTypeToString(MediaLogEvent::Type type) {
- switch (type) {
- case MediaLogEvent::WEBMEDIAPLAYER_CREATED:
- return "WEBMEDIAPLAYER_CREATED";
- case MediaLogEvent::WEBMEDIAPLAYER_DESTROYED:
- return "WEBMEDIAPLAYER_DESTROYED";
- case MediaLogEvent::PIPELINE_CREATED:
- return "PIPELINE_CREATED";
- case MediaLogEvent::PIPELINE_DESTROYED:
- return "PIPELINE_DESTROYED";
- case MediaLogEvent::LOAD:
- return "LOAD";
- case MediaLogEvent::SEEK:
- return "SEEK";
- case MediaLogEvent::PLAY:
- return "PLAY";
- case MediaLogEvent::PAUSE:
- return "PAUSE";
- case MediaLogEvent::PIPELINE_STATE_CHANGED:
- return "PIPELINE_STATE_CHANGED";
- case MediaLogEvent::PIPELINE_ERROR:
- return "PIPELINE_ERROR";
- case MediaLogEvent::VIDEO_SIZE_SET:
- return "VIDEO_SIZE_SET";
- case MediaLogEvent::DURATION_SET:
- return "DURATION_SET";
- case MediaLogEvent::TOTAL_BYTES_SET:
- return "TOTAL_BYTES_SET";
- case MediaLogEvent::NETWORK_ACTIVITY_SET:
- return "NETWORK_ACTIVITY_SET";
- case MediaLogEvent::AUDIO_ENDED:
- return "AUDIO_ENDED";
- case MediaLogEvent::VIDEO_ENDED:
- return "VIDEO_ENDED";
- case MediaLogEvent::AUDIO_RENDERER_DISABLED:
- return "AUDIO_RENDERER_DISABLED";
- case MediaLogEvent::BUFFERED_EXTENTS_CHANGED:
- return "BUFFERED_EXTENTS_CHANGED";
- case MediaLogEvent::MEDIA_SOURCE_ERROR:
- return "MEDIA_SOURCE_ERROR";
- }
- NOTREACHED();
- return NULL;
-}
-
-const char* MediaLog::PipelineStatusToString(PipelineStatus status) {
- switch (status) {
- case PIPELINE_OK:
- return "pipeline: ok";
- case PIPELINE_ERROR_URL_NOT_FOUND:
- return "pipeline: url not found";
- case PIPELINE_ERROR_NETWORK:
- return "pipeline: network error";
- case PIPELINE_ERROR_DECODE:
- return "pipeline: decode error";
- case PIPELINE_ERROR_DECRYPT:
- return "pipeline: decrypt error";
- case PIPELINE_ERROR_ABORT:
- return "pipeline: abort";
- case PIPELINE_ERROR_INITIALIZATION_FAILED:
- return "pipeline: initialization failed";
- case PIPELINE_ERROR_COULD_NOT_RENDER:
- return "pipeline: could not render";
- case PIPELINE_ERROR_READ:
- return "pipeline: read error";
- case PIPELINE_ERROR_OPERATION_PENDING:
- return "pipeline: operation pending";
- case PIPELINE_ERROR_INVALID_STATE:
- return "pipeline: invalid state";
- case DEMUXER_ERROR_COULD_NOT_OPEN:
- return "demuxer: could not open";
- case DEMUXER_ERROR_COULD_NOT_PARSE:
- return "dumuxer: could not parse";
- case DEMUXER_ERROR_NO_SUPPORTED_STREAMS:
- return "demuxer: no supported streams";
- case DECODER_ERROR_NOT_SUPPORTED:
- return "decoder: not supported";
- case PIPELINE_STATUS_MAX:
- NOTREACHED();
- }
- NOTREACHED();
- return NULL;
-}
-
-LogHelper::LogHelper(const LogCB& log_cb) : log_cb_(log_cb) {}
-
-LogHelper::~LogHelper() {
- if (log_cb_.is_null())
- return;
- log_cb_.Run(stream_.str());
-}
-
-MediaLog::MediaLog() : id_(g_media_log_count.GetNext()) {}
-
-MediaLog::~MediaLog() {}
-
-void MediaLog::AddEvent(scoped_ptr<MediaLogEvent> event) {}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateEvent(MediaLogEvent::Type type) {
- scoped_ptr<MediaLogEvent> event(new MediaLogEvent);
- event->id = id_;
- event->type = type;
- event->time = base::Time::Now();
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateBooleanEvent(
- MediaLogEvent::Type type, const char* property, bool value) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(type));
- event->params.SetBoolean(property, value);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateStringEvent(
- MediaLogEvent::Type type, const char* property, const std::string& value) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(type));
- event->params.SetString(property, value);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
- MediaLogEvent::Type type, const char* property, base::TimeDelta value) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(type));
- event->params.SetDouble(property, value.InSecondsF());
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateLoadEvent(const std::string& url) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::LOAD));
- event->params.SetString("url", url);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateSeekEvent(float seconds) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::SEEK));
- event->params.SetDouble("seek_target", seconds);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineStateChangedEvent(
- const std::string& state) {
- scoped_ptr<MediaLogEvent> event(
- CreateEvent(MediaLogEvent::PIPELINE_STATE_CHANGED));
- event->params.SetString("pipeline_state", state);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreatePipelineErrorEvent(
- PipelineStatus error) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::PIPELINE_ERROR));
- event->params.SetString("pipeline_error", PipelineStatusToString(error));
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateVideoSizeSetEvent(
- size_t width, size_t height) {
- scoped_ptr<MediaLogEvent> event(CreateEvent(MediaLogEvent::VIDEO_SIZE_SET));
- event->params.SetInteger("width", width);
- event->params.SetInteger("height", height);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateBufferedExtentsChangedEvent(
- size_t start, size_t current, size_t end) {
- scoped_ptr<MediaLogEvent> event(
- CreateEvent(MediaLogEvent::BUFFERED_EXTENTS_CHANGED));
- event->params.SetInteger("buffer_start", start);
- event->params.SetInteger("buffer_current", current);
- event->params.SetInteger("buffer_end", end);
- return event.Pass();
-}
-
-scoped_ptr<MediaLogEvent> MediaLog::CreateMediaSourceErrorEvent(
- const std::string& error) {
- scoped_ptr<MediaLogEvent> event(
- CreateEvent(MediaLogEvent::MEDIA_SOURCE_ERROR));
- event->params.SetString("error", error);
- return event.Pass();
-}
-
-} //namespace media
diff --git a/src/media/base/media_log.h b/src/media/base/media_log.h
deleted file mode 100644
index a3ec7ed..0000000
--- a/src/media/base/media_log.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MEDIA_LOG_H_
-#define MEDIA_BASE_MEDIA_LOG_H_
-
-#include <sstream>
-#include <string>
-
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-#include "media/base/media_log_event.h"
-#include "media/base/pipeline_status.h"
-
-namespace media {
-
-// Indicates a string should be added to the log.
-// First parameter - The string to add to the log.
-typedef base::Callback<void(const std::string&)> LogCB;
-
-// Helper class to make it easier to use log_cb like DVLOG().
-class LogHelper {
- public:
- LogHelper(const LogCB& Log_cb);
- ~LogHelper();
-
- std::ostream& stream() { return stream_; }
-
- private:
- LogCB log_cb_;
- std::stringstream stream_;
-};
-
-#define MEDIA_LOG(log_cb) LogHelper(log_cb).stream()
-
-class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
- public:
- // Convert various enums to strings.
- static const char* EventTypeToString(MediaLogEvent::Type type);
- static const char* PipelineStatusToString(PipelineStatus);
-
- MediaLog();
-
- // Add an event to this log. Overriden by inheritors to actually do something
- // with it.
- virtual void AddEvent(scoped_ptr<MediaLogEvent> event);
-
- // Helper methods to create events and their parameters.
- scoped_ptr<MediaLogEvent> CreateEvent(MediaLogEvent::Type type);
- scoped_ptr<MediaLogEvent> CreateBooleanEvent(
- MediaLogEvent::Type type, const char* property, bool value);
- scoped_ptr<MediaLogEvent> CreateStringEvent(
- MediaLogEvent::Type type, const char* property, const std::string& value);
- scoped_ptr<MediaLogEvent> CreateTimeEvent(
- MediaLogEvent::Type type, const char* property, base::TimeDelta value);
- scoped_ptr<MediaLogEvent> CreateLoadEvent(const std::string& url);
- scoped_ptr<MediaLogEvent> CreateSeekEvent(float seconds);
- scoped_ptr<MediaLogEvent> CreatePipelineStateChangedEvent(
- const std::string& state);
- scoped_ptr<MediaLogEvent> CreatePipelineErrorEvent(PipelineStatus error);
- scoped_ptr<MediaLogEvent> CreateVideoSizeSetEvent(
- size_t width, size_t height);
- scoped_ptr<MediaLogEvent> CreateBufferedExtentsChangedEvent(
- size_t start, size_t current, size_t end);
- scoped_ptr<MediaLogEvent> CreateMediaSourceErrorEvent(
- const std::string& error);
-
- protected:
- friend class base::RefCountedThreadSafe<MediaLog>;
- virtual ~MediaLog();
-
- private:
- // A unique (to this process) id for this MediaLog.
- int32 id_;
-
- DISALLOW_COPY_AND_ASSIGN(MediaLog);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MEDIA_LOG_H_
diff --git a/src/media/base/media_log_event.h b/src/media/base/media_log_event.h
deleted file mode 100644
index 9b0f6e1..0000000
--- a/src/media/base/media_log_event.h
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MEDIA_LOG_EVENT_H_
-#define MEDIA_BASE_MEDIA_LOG_EVENT_H_
-
-#include "base/time.h"
-#include "base/values.h"
-
-namespace media {
-
-struct MediaLogEvent {
- enum Type {
- // A WebMediaPlayer is being created or destroyed.
- // params: none.
- WEBMEDIAPLAYER_CREATED,
- WEBMEDIAPLAYER_DESTROYED,
-
- // A Pipeline is being created or destroyed.
- // params: none.
- PIPELINE_CREATED,
- PIPELINE_DESTROYED,
-
- // A media player is loading a resource.
- // params: "url": <URL of the resource>.
- LOAD,
-
- // A media player has started seeking.
- // params: "seek_target": <number of seconds to which to seek>.
- SEEK,
-
- // A media player has been told to play or pause.
- // params: none.
- PLAY,
- PAUSE,
-
- // The state of Pipeline has changed.
- // params: "pipeline_state": <string name of the state>.
- PIPELINE_STATE_CHANGED,
-
- // An error has occurred in the pipeline.
- // params: "pipeline_error": <string name of the error>.
- PIPELINE_ERROR,
-
- // The size of the video has been determined.
- // params: "width": <integral width of the video>.
- // "height": <integral height of the video>.
- VIDEO_SIZE_SET,
-
- // A property of the pipeline has been set by a filter.
- // These take a single parameter based upon the name of the event and of
- // the appropriate type. e.g. DURATION_SET: "duration" of type TimeDelta.
- DURATION_SET,
- TOTAL_BYTES_SET,
- NETWORK_ACTIVITY_SET,
-
- // Audio/Video stream playback has ended.
- AUDIO_ENDED,
- VIDEO_ENDED,
-
- // The audio renderer has been disabled.
- // params: none.
- AUDIO_RENDERER_DISABLED,
-
- // The extents of the sliding buffer have changed.
- // params: "buffer_start": <first buffered byte>.
- // "buffer_current": <current offset>.
- // "buffer_end": <last buffered byte>.
- BUFFERED_EXTENTS_CHANGED,
-
- // Errors reported by Media Source Extensions code.
- MEDIA_SOURCE_ERROR,
- // params: "error": Error string describing the error detected.
- };
-
- int32 id;
- Type type;
- base::DictionaryValue params;
- base::Time time;
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MEDIA_LOG_EVENT_H_
diff --git a/src/media/base/media_posix.cc b/src/media/base/media_posix.cc
deleted file mode 100644
index a6bebaa..0000000
--- a/src/media/base/media_posix.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/media.h"
-
-#include <string>
-
-#include "base/file_path.h"
-#include "base/logging.h"
-#include "base/path_service.h"
-#include "base/stringize_macros.h"
-#include "media/ffmpeg/ffmpeg_common.h"
-
-#if !defined(USE_SYSTEM_FFMPEG)
-#include "third_party/ffmpeg/ffmpeg_stubs.h"
-
-using third_party_ffmpeg::kNumStubModules;
-using third_party_ffmpeg::kModuleFfmpegsumo;
-using third_party_ffmpeg::InitializeStubs;
-using third_party_ffmpeg::StubPathMap;
-#endif // !defined(USE_SYSTEM_FFMPEG)
-
-namespace media {
-
-// Handy to prevent shooting ourselves in the foot with macro wizardry.
-#if !defined(LIBAVCODEC_VERSION_MAJOR) || \
- !defined(LIBAVFORMAT_VERSION_MAJOR) || \
- !defined(LIBAVUTIL_VERSION_MAJOR)
-#error FFmpeg headers not included!
-#endif
-
-#define AVCODEC_VERSION STRINGIZE(LIBAVCODEC_VERSION_MAJOR)
-#define AVFORMAT_VERSION STRINGIZE(LIBAVFORMAT_VERSION_MAJOR)
-#define AVUTIL_VERSION STRINGIZE(LIBAVUTIL_VERSION_MAJOR)
-
-#if defined(OS_MACOSX)
-// TODO(evan): should be using .so like ffmepgsumo here.
-#define DSO_NAME(MODULE, VERSION) ("lib" MODULE "." VERSION ".dylib")
-static const FilePath::CharType kSumoLib[] =
- FILE_PATH_LITERAL("ffmpegsumo.so");
-#elif defined(OS_POSIX)
-#define DSO_NAME(MODULE, VERSION) ("lib" MODULE ".so." VERSION)
-static const FilePath::CharType kSumoLib[] =
- FILE_PATH_LITERAL("libffmpegsumo.so");
-#else
-#error "Do not know how to construct DSO name for this OS."
-#endif
-
-// Use a global to indicate whether the library has been initialized or not. We
-// rely on function level static initialization in InitializeMediaLibrary() to
-// guarantee this is only set once in a thread safe manner.
-static bool g_media_library_is_initialized = false;
-
-static bool InitializeMediaLibraryInternal(const FilePath& module_dir) {
- DCHECK(!g_media_library_is_initialized);
-
-#if defined(USE_SYSTEM_FFMPEG)
- // No initialization is necessary when using system ffmpeg,
- // we just link directly with system ffmpeg libraries.
- g_media_library_is_initialized = true;
-#else
- StubPathMap paths;
-
- // First try to initialize with Chrome's sumo library.
- DCHECK_EQ(kNumStubModules, 1);
- paths[kModuleFfmpegsumo].push_back(module_dir.Append(kSumoLib).value());
-
- // If that fails, see if any system libraries are available.
- paths[kModuleFfmpegsumo].push_back(module_dir.Append(
- FILE_PATH_LITERAL(DSO_NAME("avutil", AVUTIL_VERSION))).value());
- paths[kModuleFfmpegsumo].push_back(module_dir.Append(
- FILE_PATH_LITERAL(DSO_NAME("avcodec", AVCODEC_VERSION))).value());
- paths[kModuleFfmpegsumo].push_back(module_dir.Append(
- FILE_PATH_LITERAL(DSO_NAME("avformat", AVFORMAT_VERSION))).value());
-
- g_media_library_is_initialized = InitializeStubs(paths);
-#endif // !defined(USE_SYSTEM_FFMPEG)
- return g_media_library_is_initialized;
-}
-
-bool InitializeMediaLibrary(const FilePath& base_path) {
- static const bool kMediaLibraryInitialized =
- InitializeMediaLibraryInternal(base_path);
- DCHECK_EQ(kMediaLibraryInitialized, g_media_library_is_initialized);
- return kMediaLibraryInitialized;
-}
-
-void InitializeMediaLibraryForTesting() {
- FilePath file_path;
- CHECK(PathService::Get(base::DIR_EXE, &file_path));
- CHECK(InitializeMediaLibrary(file_path));
-}
-
-bool IsMediaLibraryInitialized() {
- return g_media_library_is_initialized;
-}
-
-} // namespace media
diff --git a/src/media/base/media_stub.cc b/src/media/base/media_stub.cc
deleted file mode 100644
index a42bbf9..0000000
--- a/src/media/base/media_stub.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/media.h"
-
-#include "base/logging.h"
-
-// This file is intended for platforms that don't need to load any media
-// libraries (e.g., Android and iOS).
-namespace media {
-
-bool InitializeMediaLibrary(const FilePath& module_dir) {
- return true;
-}
-
-void InitializeMediaLibraryForTesting() {
-}
-
-bool IsMediaLibraryInitialized() {
- return true;
-}
-
-} // namespace media
diff --git a/src/media/base/media_switches.cc b/src/media/base/media_switches.cc
deleted file mode 100644
index 6c020df..0000000
--- a/src/media/base/media_switches.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/media_switches.h"
-
-namespace switches {
-
-// Allow users to specify a custom buffer size for debugging purpose.
-const char kAudioBufferSize[] = "audio-buffer-size";
-
-#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
-// The Alsa device to use when opening an audio stream.
-const char kAlsaOutputDevice[] = "alsa-output-device";
-// The Alsa device to use when opening an audio input stream.
-const char kAlsaInputDevice[] = "alsa-input-device";
-#endif
-
-#if defined(USE_CRAS)
-// Use CRAS, the ChromeOS audio server.
-const char kUseCras[] = "use-cras";
-#endif
-
-#if defined(USE_PULSEAUDIO)
-// Use PulseAudio on platforms that support it.
-const char kUsePulseAudio[] = "use-pulseaudio";
-#endif
-
-#if defined(OS_WIN)
-// Use exclusive mode audio streaming for Windows Vista and higher.
-// Leads to lower latencies for audio streams which uses the
-// AudioParameters::AUDIO_PCM_LOW_LATENCY audio path.
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/dd370844(v=vs.85).aspx
-// for details.
-const char kEnableExclusiveAudio[] = "enable-exclusive-audio";
-#endif
-
-// Disable automatic fallback from low latency to high latency path.
-const char kDisableAudioFallback[] = "disable-audio-fallback";
-
-// Disable AudioOutputResampler for automatic audio resampling and rebuffering.
-const char kDisableAudioOutputResampler[] = "disable-audio-output-resampler";
-
-// Controls renderer side mixing and low latency audio path for media elements.
-#if defined(OS_WIN) || defined(OS_MACOSX)
-const char kDisableRendererSideMixing[] = "disable-renderer-side-mixing";
-#else
-const char kEnableRendererSideMixing[] = "enable-renderer-side-mixing";
-#endif
-
-// Enable browser-side audio mixer.
-const char kEnableAudioMixer[] = "enable-audio-mixer";
-
-// Enable live audio input with getUserMedia() and the Web Audio API.
-const char kEnableWebAudioInput[] = "enable-webaudio-input";
-
-// Set number of threads to use for video decoding.
-const char kVideoThreads[] = "video-threads";
-
-// Enables support for encrypted media. Current implementation is
-// incomplete and this flag is used for development and testing.
-const char kEnableEncryptedMedia[] = "enable-encrypted-media";
-
-// Enables Opus playback in media elements.
-const char kEnableOpusPlayback[] = "enable-opus-playback";
-
-} // namespace switches
diff --git a/src/media/base/media_switches.h b/src/media/base/media_switches.h
deleted file mode 100644
index 4005efb..0000000
--- a/src/media/base/media_switches.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Defines all the "media" command-line switches.
-
-#ifndef MEDIA_BASE_MEDIA_SWITCHES_H_
-#define MEDIA_BASE_MEDIA_SWITCHES_H_
-
-#include "build/build_config.h"
-#include "media/base/media_export.h"
-
-namespace switches {
-
-#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_SOLARIS)
-extern const char kAlsaOutputDevice[];
-extern const char kAlsaInputDevice[];
-#endif
-
-MEDIA_EXPORT extern const char kAudioBufferSize[];
-
-#if defined(USE_CRAS)
-MEDIA_EXPORT extern const char kUseCras[];
-#endif
-
-#if defined(USE_PULSEAUDIO)
-MEDIA_EXPORT extern const char kUsePulseAudio[];
-#endif
-
-#if defined(OS_WIN)
-MEDIA_EXPORT extern const char kEnableExclusiveAudio[];
-#endif
-
-MEDIA_EXPORT extern const char kDisableAudioFallback[];
-
-MEDIA_EXPORT extern const char kDisableAudioOutputResampler[];
-
-#if defined(OS_WIN) || defined(OS_MACOSX)
-MEDIA_EXPORT extern const char kDisableRendererSideMixing[];
-#else
-MEDIA_EXPORT extern const char kEnableRendererSideMixing[];
-#endif
-
-MEDIA_EXPORT extern const char kEnableAudioMixer[];
-
-MEDIA_EXPORT extern const char kEnableWebAudioInput[];
-
-MEDIA_EXPORT extern const char kVideoThreads[];
-
-MEDIA_EXPORT extern const char kEnableEncryptedMedia[];
-
-MEDIA_EXPORT extern const char kEnableOpusPlayback[];
-
-} // namespace switches
-
-#endif // MEDIA_BASE_MEDIA_SWITCHES_H_
diff --git a/src/media/base/media_win.cc b/src/media/base/media_win.cc
deleted file mode 100644
index 616d370..0000000
--- a/src/media/base/media_win.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/media.h"
-
-#include <windows.h>
-#if defined(_WIN32_WINNT_WIN8)
-// The Windows 8 SDK defines FACILITY_VISUALCPP in winerror.h.
-#undef FACILITY_VISUALCPP
-#endif
-#include <delayimp.h>
-
-#include "base/file_path.h"
-#include "base/logging.h"
-#include "base/native_library.h"
-#include "base/path_service.h"
-
-#pragma comment(lib, "delayimp.lib")
-
-namespace media {
-
-// FFmpeg library name.
-static const char* kFFmpegDLL = "ffmpegsumo.dll";
-
-// Use a global to indicate whether the library has been initialized or not. We
-// rely on function level static initialization in InitializeMediaLibrary() to
-// guarantee this is only set once in a thread safe manner.
-static bool g_media_library_is_initialized = false;
-
-static bool InitializeMediaLibraryInternal(const FilePath& base_path) {
- DCHECK(!g_media_library_is_initialized);
-
- // LoadLibraryEx(..., LOAD_WITH_ALTERED_SEARCH_PATH) cannot handle
- // relative path.
- if (!base_path.IsAbsolute())
- return false;
-
- // Use alternate DLL search path so we don't load dependencies from the
- // system path. Refer to http://crbug.com/35857
- HMODULE lib = ::LoadLibraryEx(
- base_path.AppendASCII(kFFmpegDLL).value().c_str(), NULL,
- LOAD_WITH_ALTERED_SEARCH_PATH);
-
- // Check that we loaded the library successfully.
- g_media_library_is_initialized = (lib != NULL);
- return g_media_library_is_initialized;
-}
-
-bool InitializeMediaLibrary(const FilePath& base_path) {
- static const bool kMediaLibraryInitialized =
- InitializeMediaLibraryInternal(base_path);
- DCHECK_EQ(kMediaLibraryInitialized, g_media_library_is_initialized);
- return kMediaLibraryInitialized;
-}
-
-void InitializeMediaLibraryForTesting() {
- FilePath file_path;
- CHECK(PathService::Get(base::DIR_EXE, &file_path));
- CHECK(InitializeMediaLibrary(file_path));
-}
-
-bool IsMediaLibraryInitialized() {
- return g_media_library_is_initialized;
-}
-
-} // namespace media
diff --git a/src/media/base/message_loop_factory.cc b/src/media/base/message_loop_factory.cc
deleted file mode 100644
index 38f4473..0000000
--- a/src/media/base/message_loop_factory.cc
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/message_loop_factory.h"
-
-#include "base/threading/thread.h"
-
-#if defined(OS_STARBOARD)
-#include "starboard/configuration.h"
-#endif // defined(OS_STARBOARD)
-
-namespace media {
-
-MessageLoopFactory::MessageLoopFactory() {}
-
-MessageLoopFactory::~MessageLoopFactory() {
- for (ThreadList::reverse_iterator it = threads_.rbegin();
- it != threads_.rend(); ++it) {
- base::Thread* thread = it->second;
- thread->Stop();
- delete thread;
- }
- threads_.clear();
-}
-
-scoped_refptr<base::MessageLoopProxy> MessageLoopFactory::GetMessageLoop(
- Type type) {
- return GetThread(type)->message_loop_proxy();
-}
-
-base::Thread* MessageLoopFactory::GetThread(Type type) {
- base::AutoLock auto_lock(lock_);
- for (ThreadList::iterator it = threads_.begin(); it != threads_.end(); ++it) {
- if (it->first == type)
- return it->second;
- }
-
- const char* name = NULL;
- switch (type) {
- case kPipeline:
- name = "MediaPipeline";
- break;
- }
-
- base::Thread* thread = new base::Thread(name);
- base::Thread::Options options;
-
- if (type == kPipeline) {
-#if defined(OS_STARBOARD) && defined(SB_MEDIA_THREAD_STACK_SIZE)
- options.stack_size = SB_MEDIA_THREAD_STACK_SIZE;
-#endif // defined(OS_STARBOARD) && defined(SB_MEDIA_THREAD_STACK_SIZE)
- }
-
- CHECK(thread->StartWithOptions(options))
- << "Failed to start thread: " << name;
- threads_.push_back(std::make_pair(type, thread));
- return thread;
-}
-
-} // namespace media
diff --git a/src/media/base/message_loop_factory.h b/src/media/base/message_loop_factory.h
deleted file mode 100644
index f399b48..0000000
--- a/src/media/base/message_loop_factory.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MESSAGE_LOOP_FACTORY_H_
-#define MEDIA_BASE_MESSAGE_LOOP_FACTORY_H_
-
-#include <list>
-#include <string>
-
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop_proxy.h"
-#include "base/synchronization/lock.h"
-#include "media/base/media_export.h"
-
-class MessageLoop;
-
-namespace base {
-class Thread;
-}
-
-namespace media {
-
-// Factory object that manages named MessageLoops.
-//
-// TODO(scherkus): replace this with something simpler http://crbug.com/116873
-class MEDIA_EXPORT MessageLoopFactory {
- public:
- enum Type {
- kPipeline
- };
-
- MessageLoopFactory();
-
- // Get the message loop proxy associated with |type|. A new MessageLoopProxy
- // is created if the factory doesn't have one associated with |type|.
- scoped_refptr<base::MessageLoopProxy> GetMessageLoop(Type type);
-
- private:
- // Only allow scoped_ptr<> to delete factory.
- friend class scoped_ptr<MessageLoopFactory>;
- ~MessageLoopFactory();
-
- // Returns the thread associated with |type| creating a new thread if needed.
- base::Thread* GetThread(Type type);
-
- // Lock used to serialize access for the following data members.
- base::Lock lock_;
-
- // List of pairs of created threads and their types. We use a list to ensure
- // threads are stopped & deleted in reverse order of creation.
- typedef std::list<std::pair<Type, base::Thread*> > ThreadList;
- ThreadList threads_;
-
- DISALLOW_COPY_AND_ASSIGN(MessageLoopFactory);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MESSAGE_LOOP_FACTORY_H_
diff --git a/src/media/base/mock_audio_renderer_sink.cc b/src/media/base/mock_audio_renderer_sink.cc
deleted file mode 100644
index b21eb19..0000000
--- a/src/media/base/mock_audio_renderer_sink.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/mock_audio_renderer_sink.h"
-
-namespace media {
-
-MockAudioRendererSink::MockAudioRendererSink() {}
-MockAudioRendererSink::~MockAudioRendererSink() {}
-
-void MockAudioRendererSink::Initialize(const AudioParameters& params,
- RenderCallback* renderer) {
- callback_ = renderer;
-}
-
-} // namespace media
diff --git a/src/media/base/mock_audio_renderer_sink.h b/src/media/base/mock_audio_renderer_sink.h
deleted file mode 100644
index 7f59bf6..0000000
--- a/src/media/base/mock_audio_renderer_sink.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
-#define MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
-
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_renderer_sink.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockAudioRendererSink : public AudioRendererSink {
- public:
- MockAudioRendererSink();
-
- MOCK_METHOD0(Start, void());
- MOCK_METHOD0(Stop, void());
- MOCK_METHOD1(Pause, void(bool flush));
- MOCK_METHOD0(Play, void());
- MOCK_METHOD1(SetVolume, bool(double volume));
-#if defined(__LB_SHELL__) || defined(COBALT)
- MOCK_METHOD1(ResumeAfterUnderflow, void(bool));
-#endif
-
- virtual void Initialize(const AudioParameters& params,
- RenderCallback* renderer) OVERRIDE;
- AudioRendererSink::RenderCallback* callback() { return callback_; }
-
- protected:
- virtual ~MockAudioRendererSink();
-
- private:
- RenderCallback* callback_;
-
- DISALLOW_COPY_AND_ASSIGN(MockAudioRendererSink);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_AUDIO_RENDERER_SINK_H_
diff --git a/src/media/base/mock_data_source_host.cc b/src/media/base/mock_data_source_host.cc
deleted file mode 100644
index eff0b78..0000000
--- a/src/media/base/mock_data_source_host.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/mock_data_source_host.h"
-
-namespace media {
-
-MockDataSourceHost::MockDataSourceHost() {}
-
-MockDataSourceHost::~MockDataSourceHost() {}
-
-} // namespace media
diff --git a/src/media/base/mock_data_source_host.h b/src/media/base/mock_data_source_host.h
deleted file mode 100644
index 914d055..0000000
--- a/src/media/base/mock_data_source_host.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-#ifndef MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
-#define MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
-
-#include <string>
-
-#include "media/base/data_source.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockDataSourceHost : public DataSourceHost {
- public:
- MockDataSourceHost();
- virtual ~MockDataSourceHost();
-
- // DataSourceHost implementation.
- MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
- MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
- MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
- base::TimeDelta end));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDataSourceHost);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_DATA_SOURCE_HOST_H_
diff --git a/src/media/base/mock_demuxer_host.cc b/src/media/base/mock_demuxer_host.cc
deleted file mode 100644
index 100787f..0000000
--- a/src/media/base/mock_demuxer_host.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/mock_demuxer_host.h"
-
-namespace media {
-
-MockDemuxerHost::MockDemuxerHost() {}
-
-MockDemuxerHost::~MockDemuxerHost() {}
-
-} // namespace media
diff --git a/src/media/base/mock_demuxer_host.h b/src/media/base/mock_demuxer_host.h
deleted file mode 100644
index 597c132..0000000
--- a/src/media/base/mock_demuxer_host.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-#ifndef MEDIA_BASE_MOCK_DEMUXER_HOST_H_
-#define MEDIA_BASE_MOCK_DEMUXER_HOST_H_
-
-#include <string>
-
-#include "media/base/demuxer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockDemuxerHost : public DemuxerHost {
- public:
- MockDemuxerHost();
- virtual ~MockDemuxerHost();
-
- // DataSourceHost implementation.
- MOCK_METHOD1(SetTotalBytes, void(int64 total_bytes));
- MOCK_METHOD2(AddBufferedByteRange, void(int64 start, int64 end));
- MOCK_METHOD2(AddBufferedTimeRange, void(base::TimeDelta start,
- base::TimeDelta end));
-
- // DemuxerHost implementation.
- MOCK_METHOD1(OnDemuxerError, void(PipelineStatus error));
- MOCK_METHOD1(SetDuration, void(base::TimeDelta duration));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDemuxerHost);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_DEMUXER_HOST_H_
diff --git a/src/media/base/mock_filters.cc b/src/media/base/mock_filters.cc
deleted file mode 100644
index c197ee2..0000000
--- a/src/media/base/mock_filters.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/mock_filters.h"
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-
-using ::testing::_;
-using ::testing::Invoke;
-using ::testing::NotNull;
-using ::testing::Return;
-
-namespace media {
-
-MockDemuxer::MockDemuxer() {}
-
-MockDemuxer::~MockDemuxer() {}
-
-MockDemuxerStream::MockDemuxerStream() {}
-
-MockDemuxerStream::~MockDemuxerStream() {}
-
-MockVideoDecoder::MockVideoDecoder() {
- EXPECT_CALL(*this, HasAlpha()).WillRepeatedly(Return(false));
-}
-
-MockVideoDecoder::~MockVideoDecoder() {}
-
-MockAudioDecoder::MockAudioDecoder() {}
-
-MockAudioDecoder::~MockAudioDecoder() {}
-
-MockVideoRenderer::MockVideoRenderer() {}
-
-MockVideoRenderer::~MockVideoRenderer() {}
-
-MockAudioRenderer::MockAudioRenderer() {}
-
-MockAudioRenderer::~MockAudioRenderer() {}
-
-MockDecryptor::MockDecryptor() {}
-
-MockDecryptor::~MockDecryptor() {}
-
-void MockDecryptor::InitializeAudioDecoder(
- scoped_ptr<AudioDecoderConfig> config,
- const DecoderInitCB& init_cb) {
- InitializeAudioDecoderMock(*config, init_cb);
-}
-
-void MockDecryptor::InitializeVideoDecoder(
- scoped_ptr<VideoDecoderConfig> config,
- const DecoderInitCB& init_cb) {
- InitializeVideoDecoderMock(*config, init_cb);
-}
-
-MockDecryptorClient::MockDecryptorClient() {}
-
-MockDecryptorClient::~MockDecryptorClient() {}
-
-void MockDecryptorClient::NeedKey(const std::string& key_system,
- const std::string& session_id,
- const std::string& type,
- scoped_array<uint8> init_data,
- int init_data_length) {
- NeedKeyMock(key_system, session_id, type, init_data.get(), init_data_length);
-}
-
-MockFilterCollection::MockFilterCollection()
- : demuxer_(new MockDemuxer()),
- video_decoder_(new MockVideoDecoder()),
- audio_decoder_(new MockAudioDecoder()),
- video_renderer_(new MockVideoRenderer()),
- audio_renderer_(new MockAudioRenderer()) {
-}
-
-MockFilterCollection::~MockFilterCollection() {}
-
-scoped_ptr<FilterCollection> MockFilterCollection::Create() {
- scoped_ptr<FilterCollection> collection(new FilterCollection());
- collection->SetDemuxer(demuxer_);
- collection->GetVideoDecoders()->push_back(video_decoder_);
- collection->GetAudioDecoders()->push_back(audio_decoder_);
- collection->AddVideoRenderer(video_renderer_);
- collection->AddAudioRenderer(audio_renderer_);
- return collection.Pass();
-}
-
-MockStatisticsCB::MockStatisticsCB() {}
-
-MockStatisticsCB::~MockStatisticsCB() {}
-
-} // namespace media
diff --git a/src/media/base/mock_filters.h b/src/media/base/mock_filters.h
deleted file mode 100644
index 6faf508..0000000
--- a/src/media/base/mock_filters.h
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A new breed of mock media filters, this time using gmock! Feel free to add
-// actions if you need interesting side-effects.
-//
-// Don't forget you can use StrictMock<> and NiceMock<> if you want the mock
-// filters to fail the test or do nothing when an unexpected method is called.
-// http://code.google.com/p/googlemock/wiki/CookBook#Nice_Mocks_and_Strict_Mocks
-
-#ifndef MEDIA_BASE_MOCK_FILTERS_H_
-#define MEDIA_BASE_MOCK_FILTERS_H_
-
-#include <string>
-
-#include "base/callback.h"
-#include "media/base/audio_decoder.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/audio_renderer.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/decryptor.h"
-#include "media/base/decryptor_client.h"
-#include "media/base/demuxer.h"
-#include "media/base/filter_collection.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/video_decoder.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_renderer.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-// Use this template to test for object destruction by setting expectations on
-// the method OnDestroy().
-//
-// TODO(scherkus): not sure about the naming... perhaps contribute this back
-// to gmock itself!
-template<class MockClass>
-class Destroyable : public MockClass {
- public:
- Destroyable() {}
-
- MOCK_METHOD0(OnDestroy, void());
-
- protected:
- virtual ~Destroyable() {
- OnDestroy();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Destroyable);
-};
-
-class MockDemuxer : public Demuxer {
- public:
- MockDemuxer();
-
- // Demuxer implementation.
- MOCK_METHOD2(Initialize, void(DemuxerHost* host, const PipelineStatusCB& cb));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
- MOCK_METHOD2(Seek, void(base::TimeDelta time, const PipelineStatusCB& cb));
- MOCK_METHOD1(Stop, void(const base::Closure& callback));
- MOCK_METHOD0(OnAudioRendererDisabled, void());
- MOCK_METHOD1(GetStream, scoped_refptr<DemuxerStream>(DemuxerStream::Type));
- MOCK_CONST_METHOD0(GetStartTime, base::TimeDelta());
-
- protected:
- virtual ~MockDemuxer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDemuxer);
-};
-
-class MockDemuxerStream : public DemuxerStream {
- public:
- MockDemuxerStream();
-
- // DemuxerStream implementation.
- MOCK_METHOD0(type, Type());
- MOCK_METHOD1(Read, void(const ReadCB& read_cb));
- MOCK_METHOD0(audio_decoder_config, const AudioDecoderConfig&());
- MOCK_METHOD0(video_decoder_config, const VideoDecoderConfig&());
- MOCK_METHOD0(EnableBitstreamConverter, void());
-#if defined(__LB_SHELL__) || defined(COBALT)
- MOCK_CONST_METHOD0(StreamWasEncrypted, bool());
-#endif
-
- protected:
- virtual ~MockDemuxerStream();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDemuxerStream);
-};
-
-class MockVideoDecoder : public VideoDecoder {
- public:
- MockVideoDecoder();
-
- // VideoDecoder implementation.
- MOCK_METHOD3(Initialize, void(const scoped_refptr<DemuxerStream>&,
- const PipelineStatusCB&,
- const StatisticsCB&));
- MOCK_METHOD1(Read, void(const ReadCB&));
- MOCK_METHOD1(Reset, void(const base::Closure&));
- MOCK_METHOD1(Stop, void(const base::Closure&));
- MOCK_CONST_METHOD0(HasAlpha, bool());
-
- protected:
- virtual ~MockVideoDecoder();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockVideoDecoder);
-};
-
-class MockAudioDecoder : public AudioDecoder {
- public:
- MockAudioDecoder();
-
- // AudioDecoder implementation.
- MOCK_METHOD3(Initialize, void(const scoped_refptr<DemuxerStream>&,
- const PipelineStatusCB&,
- const StatisticsCB&));
- MOCK_METHOD1(Read, void(const ReadCB&));
- MOCK_METHOD0(bits_per_channel, int(void));
- MOCK_METHOD0(channel_layout, ChannelLayout(void));
- MOCK_METHOD0(samples_per_second, int(void));
- MOCK_METHOD1(Reset, void(const base::Closure&));
-
- protected:
- virtual ~MockAudioDecoder();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioDecoder);
-};
-
-class MockVideoRenderer : public VideoRenderer {
- public:
- MockVideoRenderer();
-
- // VideoRenderer implementation.
- MOCK_METHOD10(Initialize, void(const scoped_refptr<DemuxerStream>& stream,
- const VideoDecoderList& decoders,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const TimeCB& time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb,
- const TimeDeltaCB& get_duration_cb));
- MOCK_METHOD1(Play, void(const base::Closure& callback));
- MOCK_METHOD1(Pause, void(const base::Closure& callback));
- MOCK_METHOD1(Flush, void(const base::Closure& callback));
- MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
- MOCK_METHOD1(Stop, void(const base::Closure& callback));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
-
- protected:
- virtual ~MockVideoRenderer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockVideoRenderer);
-};
-
-class MockAudioRenderer : public AudioRenderer {
- public:
- MockAudioRenderer();
-
- // AudioRenderer implementation.
- MOCK_METHOD9(Initialize, void(const scoped_refptr<DemuxerStream>& stream,
- const AudioDecoderList& decoders,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const base::Closure& underflow_cb,
- const TimeCB& time_cb,
- const base::Closure& ended_cb,
- const base::Closure& disabled_cb,
- const PipelineStatusCB& error_cb));
- MOCK_METHOD1(Play, void(const base::Closure& callback));
- MOCK_METHOD1(Pause, void(const base::Closure& callback));
- MOCK_METHOD1(Flush, void(const base::Closure& callback));
- MOCK_METHOD1(Stop, void(const base::Closure& callback));
- MOCK_METHOD1(SetPlaybackRate, void(float playback_rate));
- MOCK_METHOD2(Preroll, void(base::TimeDelta time, const PipelineStatusCB& cb));
- MOCK_METHOD1(SetVolume, void(float volume));
- MOCK_METHOD1(ResumeAfterUnderflow, void(bool buffer_more_audio));
-
- protected:
- virtual ~MockAudioRenderer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioRenderer);
-};
-
-class MockDecryptor : public Decryptor {
- public:
- MockDecryptor();
- virtual ~MockDecryptor();
-
- MOCK_METHOD4(GenerateKeyRequest, bool(const std::string& key_system,
- const std::string& type,
- const uint8* init_data,
- int init_data_length));
- MOCK_METHOD6(AddKey, void(const std::string& key_system,
- const uint8* key,
- int key_length,
- const uint8* init_data,
- int init_data_length,
- const std::string& session_id));
- MOCK_METHOD2(CancelKeyRequest, void(const std::string& key_system,
- const std::string& session_id));
- MOCK_METHOD2(RegisterKeyAddedCB, void(StreamType stream_type,
- const KeyAddedCB& key_added_cb));
- MOCK_METHOD3(Decrypt, void(StreamType stream_type,
- const scoped_refptr<DecoderBuffer>& encrypted,
- const DecryptCB& decrypt_cb));
- MOCK_METHOD1(CancelDecrypt, void(StreamType stream_type));
- // TODO(xhwang): The following two methods are workarounds of the issue that
- // move-only parameters are not supported in mocked methods. Remove when the
- // issue is fixed: http://code.google.com/p/googletest/issues/detail?id=395
- MOCK_METHOD2(InitializeAudioDecoderMock,
- void(const AudioDecoderConfig& config,
- const DecoderInitCB& init_cb));
- MOCK_METHOD2(InitializeVideoDecoderMock,
- void(const VideoDecoderConfig& config,
- const DecoderInitCB& init_cb));
- MOCK_METHOD2(DecryptAndDecodeAudio,
- void(const scoped_refptr<media::DecoderBuffer>& encrypted,
- const AudioDecodeCB& audio_decode_cb));
- MOCK_METHOD2(DecryptAndDecodeVideo,
- void(const scoped_refptr<media::DecoderBuffer>& encrypted,
- const VideoDecodeCB& video_decode_cb));
- MOCK_METHOD1(ResetDecoder, void(StreamType stream_type));
- MOCK_METHOD1(DeinitializeDecoder, void(StreamType stream_type));
-
- virtual void InitializeAudioDecoder(scoped_ptr<AudioDecoderConfig> config,
- const DecoderInitCB& init_cb) OVERRIDE;
- virtual void InitializeVideoDecoder(scoped_ptr<VideoDecoderConfig> config,
- const DecoderInitCB& init_cb) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDecryptor);
-};
-
-class MockDecryptorClient : public DecryptorClient {
- public:
- MockDecryptorClient();
- virtual ~MockDecryptorClient();
-
- MOCK_METHOD2(KeyAdded, void(const std::string&, const std::string&));
- MOCK_METHOD4(KeyError, void(const std::string&, const std::string&,
- Decryptor::KeyError, int));
- MOCK_METHOD4(KeyMessage, void(const std::string& key_system,
- const std::string& session_id,
- const std::string& message,
- const std::string& default_url));
- // TODO(xhwang): This is a workaround of the issue that move-only parameters
- // are not supported in mocked methods. Remove this when the issue is fixed
- // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
- // std::string instead of scoped_array<uint8> (http://crbug.com/130689).
- MOCK_METHOD5(NeedKeyMock, void(const std::string& key_system,
- const std::string& session_id,
- const std::string& type,
- const uint8* init_data,
- int init_data_length));
- virtual void NeedKey(const std::string& key_system,
- const std::string& session_id,
- const std::string& type,
- scoped_array<uint8> init_data,
- int init_data_length) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockDecryptorClient);
-};
-
-// FilterFactory that returns canned instances of mock filters. You can set
-// expectations on the filters and then pass the collection into a pipeline.
-class MockFilterCollection {
- public:
- MockFilterCollection();
- virtual ~MockFilterCollection();
-
- // Mock accessors.
- MockDemuxer* demuxer() const { return demuxer_; }
- MockVideoDecoder* video_decoder() const { return video_decoder_; }
- MockAudioDecoder* audio_decoder() const { return audio_decoder_; }
- MockVideoRenderer* video_renderer() const { return video_renderer_; }
- MockAudioRenderer* audio_renderer() const { return audio_renderer_; }
-
- // Creates the FilterCollection containing the mocks.
- scoped_ptr<FilterCollection> Create();
-
- private:
- scoped_refptr<MockDemuxer> demuxer_;
- scoped_refptr<MockVideoDecoder> video_decoder_;
- scoped_refptr<MockAudioDecoder> audio_decoder_;
- scoped_refptr<MockVideoRenderer> video_renderer_;
- scoped_refptr<MockAudioRenderer> audio_renderer_;
-
- DISALLOW_COPY_AND_ASSIGN(MockFilterCollection);
-};
-
-// Helper mock statistics callback.
-class MockStatisticsCB {
- public:
- MockStatisticsCB();
- ~MockStatisticsCB();
-
- MOCK_METHOD1(OnStatistics, void(const media::PipelineStatistics& statistics));
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_FILTERS_H_
diff --git a/src/media/base/mock_shell_data_source_reader.h b/src/media/base/mock_shell_data_source_reader.h
deleted file mode 100644
index 49ae468..0000000
--- a/src/media/base/mock_shell_data_source_reader.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_MOCK_SHELL_DATA_SOURCE_READER_H_
-#define MEDIA_BASE_MOCK_SHELL_DATA_SOURCE_READER_H_
-
-#include "media/base/shell_data_source_reader.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace media {
-
-class MockShellDataSourceReader : public ShellDataSourceReader {
- public:
- MockShellDataSourceReader() {}
-
- // ShellDataSourceReader implementation
- MOCK_METHOD1(SetDataSource, void(scoped_refptr<DataSource>));
- MOCK_METHOD1(SetErrorCallback, void(base::Closure));
- MOCK_METHOD3(BlockingRead, int(int64, int, uint8*));
- MOCK_METHOD0(FileSize, int64());
- MOCK_METHOD0(AbortPendingReadIfAny, void());
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MOCK_SHELL_DATA_SOURCE_READER_H_
diff --git a/src/media/base/multi_channel_resampler.cc b/src/media/base/multi_channel_resampler.cc
deleted file mode 100644
index a5cbf3e..0000000
--- a/src/media/base/multi_channel_resampler.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/multi_channel_resampler.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "media/base/audio_bus.h"
-
-namespace media {
-
-MultiChannelResampler::MultiChannelResampler(int channels,
- double io_sample_rate_ratio,
- const ReadCB& read_cb)
- : last_frame_count_(0),
- read_cb_(read_cb),
- output_frames_ready_(0) {
- // Allocate each channel's resampler.
- resamplers_.reserve(channels);
- for (int i = 0; i < channels; ++i) {
- resamplers_.push_back(new SincResampler(io_sample_rate_ratio, base::Bind(
- &MultiChannelResampler::ProvideInput, base::Unretained(this), i)));
- }
-}
-
-MultiChannelResampler::~MultiChannelResampler() {}
-
-void MultiChannelResampler::Resample(AudioBus* audio_bus, int frames) {
- DCHECK_EQ(static_cast<size_t>(audio_bus->channels()), resamplers_.size());
-
- // We need to ensure that SincResampler only calls ProvideInput once for each
- // channel. To ensure this, we chunk the number of requested frames into
- // SincResampler::ChunkSize() sized chunks. SincResampler guarantees it will
- // only call ProvideInput() once when we resample this way.
- output_frames_ready_ = 0;
- int chunk_size = resamplers_[0]->ChunkSize();
- while (output_frames_ready_ < frames) {
- int frames_this_time = std::min(frames - output_frames_ready_, chunk_size);
-
- // Resample each channel.
- for (size_t i = 0; i < resamplers_.size(); ++i) {
- DCHECK_EQ(chunk_size, resamplers_[i]->ChunkSize());
-
- // Depending on the sample-rate scale factor, and the internal buffering
- // used in a SincResampler kernel, this call to Resample() will only
- // sometimes call ProvideInput(). However, if it calls ProvideInput() for
- // the first channel, then it will call it for the remaining channels,
- // since they all buffer in the same way and are processing the same
- // number of frames.
- resamplers_[i]->Resample(
- audio_bus->channel(i) + output_frames_ready_, frames_this_time);
- }
-
- output_frames_ready_ += frames_this_time;
- }
-}
-
-void MultiChannelResampler::ProvideInput(int channel, float* destination,
- int frames) {
- // Get the data from the multi-channel provider when the first channel asks
- // for it. For subsequent channels, we can just dish out the channel data
- // from that (stored in |resampler_audio_bus_|).
- if (channel == 0) {
- // Allocate staging arrays on the first request and if the frame size or
- // |destination| changes (should only happen once).
- if (!resampler_audio_bus_.get() ||
- resampler_audio_bus_->frames() != frames ||
- wrapped_resampler_audio_bus_->channel(0) != destination) {
- resampler_audio_bus_ = AudioBus::Create(resamplers_.size(), frames);
-
- // Create a channel vector based on |resampler_audio_bus_| but using
- // |destination| directly for the first channel and then wrap it in a new
- // AudioBus so we can avoid an extra memcpy later.
- resampler_audio_data_.clear();
- resampler_audio_data_.reserve(resampler_audio_bus_->channels());
- resampler_audio_data_.push_back(destination);
- for (int i = 1; i < resampler_audio_bus_->channels(); ++i)
- resampler_audio_data_.push_back(resampler_audio_bus_->channel(i));
- wrapped_resampler_audio_bus_ = AudioBus::WrapVector(
- frames, resampler_audio_data_);
- }
-
- last_frame_count_ = frames;
- read_cb_.Run(output_frames_ready_, wrapped_resampler_audio_bus_.get());
- } else {
- // All channels must ask for the same amount. This should always be the
- // case, but let's just make sure.
- DCHECK_EQ(frames, last_frame_count_);
-
- // Copy the channel data from what we received from |read_cb_|.
- memcpy(destination, resampler_audio_bus_->channel(channel),
- sizeof(*resampler_audio_bus_->channel(channel)) * frames);
- }
-}
-
-void MultiChannelResampler::Flush() {
- last_frame_count_ = 0;
- for (size_t i = 0; i < resamplers_.size(); ++i)
- resamplers_[i]->Flush();
-}
-
-} // namespace media
diff --git a/src/media/base/multi_channel_resampler.h b/src/media/base/multi_channel_resampler.h
deleted file mode 100644
index 6dd565b..0000000
--- a/src/media/base/multi_channel_resampler.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
-#define MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/scoped_vector.h"
-#include "media/base/sinc_resampler.h"
-
-namespace media {
-class AudioBus;
-
-// MultiChannelResampler is a multi channel wrapper for SincResampler; allowing
-// high quality sample rate conversion of multiple channels at once.
-class MEDIA_EXPORT MultiChannelResampler {
- public:
- // Callback type for providing more data into the resampler. Expects AudioBus
- // to be completely filled with data upon return; zero padded if not enough
- // frames are available to satisfy the request. |frame_delay| is the number
- // of output frames already processed and can be used to estimate delay.
- typedef base::Callback<void(int frame_delay, AudioBus* audio_bus)> ReadCB;
-
- // Constructs a MultiChannelResampler with the specified |read_cb|, which is
- // used to acquire audio data for resampling. |io_sample_rate_ratio| is the
- // ratio of input / output sample rates.
- MultiChannelResampler(int channels, double io_sample_rate_ratio,
- const ReadCB& read_cb);
- virtual ~MultiChannelResampler();
-
- // Resamples |frames| of data from |read_cb_| into AudioBus.
- void Resample(AudioBus* audio_bus, int frames);
-
- // Flush all buffered data and reset internal indices.
- void Flush();
-
- private:
- // SincResampler::ReadCB implementation. ProvideInput() will be called for
- // each channel (in channel order) as SincResampler needs more data.
- void ProvideInput(int channel, float* destination, int frames);
-
- // Sanity check to ensure that ProvideInput() retrieves the same number of
- // frames for every channel.
- int last_frame_count_;
-
- // Source of data for resampling.
- ReadCB read_cb_;
-
- // Each channel has its own high quality resampler.
- ScopedVector<SincResampler> resamplers_;
-
- // Buffers for audio data going into SincResampler from ReadCB.
- scoped_ptr<AudioBus> resampler_audio_bus_;
- scoped_ptr<AudioBus> wrapped_resampler_audio_bus_;
- std::vector<float*> resampler_audio_data_;
-
- // The number of output frames that have successfully been processed during
- // the current Resample() call.
- int output_frames_ready_;
-
- DISALLOW_COPY_AND_ASSIGN(MultiChannelResampler);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_MULTI_CHANNEL_RESAMPLER_H_
diff --git a/src/media/base/multi_channel_resampler_unittest.cc b/src/media/base/multi_channel_resampler_unittest.cc
deleted file mode 100644
index ad67550..0000000
--- a/src/media/base/multi_channel_resampler_unittest.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cmath>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/audio_bus.h"
-#include "media/base/multi_channel_resampler.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-// Just test a basic resampling case. The SincResampler unit test will take
-// care of accuracy testing; we just need to check that multichannel works as
-// expected within some tolerance.
-static const float kScaleFactor = 192000.0f / 44100.0f;
-
-// Simulate large and small sample requests used by the different audio paths.
-static const int kHighLatencySize = 8192;
-// Low latency buffers show a larger error than high latency ones. Which makes
-// sense since each error represents a larger portion of the total request.
-static const int kLowLatencySize = 128;
-
-// Test fill value.
-static const float kFillValue = 0.1f;
-
-// Chosen arbitrarily based on what each resampler reported during testing.
-static const double kLowLatencyMaxRMSError = 0.0036;
-static const double kLowLatencyMaxError = 0.04;
-static const double kHighLatencyMaxRMSError = 0.0036;
-static const double kHighLatencyMaxError = 0.04;
-
-class MultiChannelResamplerTest
- : public testing::TestWithParam<int> {
- public:
- MultiChannelResamplerTest()
- : last_frame_delay_(-1) {
- }
- virtual ~MultiChannelResamplerTest() {}
-
- void InitializeAudioData(int channels, int frames) {
- frames_ = frames;
- audio_bus_ = AudioBus::Create(channels, frames);
- }
-
- // MultiChannelResampler::MultiChannelAudioSourceProvider implementation, just
- // fills the provided audio_data with |kFillValue|.
- virtual void ProvideInput(int frame_delay, AudioBus* audio_bus) {
- EXPECT_GT(frame_delay, last_frame_delay_);
- last_frame_delay_ = frame_delay;
-
- float fill_value = fill_junk_values_ ? (1 / kFillValue) : kFillValue;
- EXPECT_EQ(audio_bus->channels(), audio_bus_->channels());
- for (int i = 0; i < audio_bus->channels(); ++i)
- for (int j = 0; j < audio_bus->frames(); ++j)
- audio_bus->channel(i)[j] = fill_value;
- }
-
- void MultiChannelTest(int channels, int frames, double expected_max_rms_error,
- double expected_max_error) {
- InitializeAudioData(channels, frames);
- MultiChannelResampler resampler(channels, kScaleFactor, base::Bind(
- &MultiChannelResamplerTest::ProvideInput, base::Unretained(this)));
-
- // First prime the resampler with some junk data, so we can verify Flush().
- fill_junk_values_ = true;
- resampler.Resample(audio_bus_.get(), 1);
- resampler.Flush();
- fill_junk_values_ = false;
-
- // The last frame delay should be strictly less than the total frame count.
- EXPECT_LT(last_frame_delay_, audio_bus_->frames());
- last_frame_delay_ = -1;
-
- // If Flush() didn't work, the rest of the tests will fail.
- resampler.Resample(audio_bus_.get(), frames);
- TestValues(expected_max_rms_error, expected_max_error);
- }
-
- void HighLatencyTest(int channels) {
- MultiChannelTest(channels, kHighLatencySize, kHighLatencyMaxRMSError,
- kHighLatencyMaxError);
- }
-
- void LowLatencyTest(int channels) {
- MultiChannelTest(channels, kLowLatencySize, kLowLatencyMaxRMSError,
- kLowLatencyMaxError);
- }
-
- void TestValues(double expected_max_rms_error, double expected_max_error ) {
- // Calculate Root-Mean-Square-Error for the resampling.
- double max_error = 0.0;
- double sum_of_squares = 0.0;
- for (int i = 0; i < audio_bus_->channels(); ++i) {
- for (int j = 0; j < frames_; ++j) {
- // Ensure all values are accounted for.
- ASSERT_NE(audio_bus_->channel(i)[j], 0);
-
- double error = fabs(audio_bus_->channel(i)[j] - kFillValue);
- max_error = std::max(max_error, error);
- sum_of_squares += error * error;
- }
- }
-
- double rms_error = sqrt(
- sum_of_squares / (frames_ * audio_bus_->channels()));
-
- EXPECT_LE(rms_error, expected_max_rms_error);
- EXPECT_LE(max_error, expected_max_error);
- }
-
- protected:
- int frames_;
- bool fill_junk_values_;
- scoped_ptr<AudioBus> audio_bus_;
- int last_frame_delay_;
-
- DISALLOW_COPY_AND_ASSIGN(MultiChannelResamplerTest);
-};
-
-TEST_P(MultiChannelResamplerTest, HighLatency) {
- HighLatencyTest(GetParam());
-}
-
-TEST_P(MultiChannelResamplerTest, LowLatency) {
- LowLatencyTest(GetParam());
-}
-
-// Test common channel layouts: mono, stereo, 5.1, 7.1.
-INSTANTIATE_TEST_CASE_P(
- MultiChannelResamplerTest, MultiChannelResamplerTest,
- testing::Values(1, 2, 6, 8));
-
-} // namespace media
diff --git a/src/media/base/pipeline.h b/src/media/base/pipeline.h
deleted file mode 100644
index d3d752a..0000000
--- a/src/media/base/pipeline.h
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef MEDIA_BASE_PIPELINE_H_
-#define MEDIA_BASE_PIPELINE_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop_proxy.h"
-#include "base/time.h"
-#include "media/base/decryptor.h"
-#include "media/base/filter_collection.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/ranges.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-
-#if defined(OS_STARBOARD)
-#if SB_HAS(PLAYER)
-
-#define COBALT_USE_SBPLAYER_PIPELINE
-
-#endif // SB_HAS(PLAYER)
-#endif // defined(OS_STARBOARD)
-
-#if defined(COBALT_USE_SBPLAYER_PIPELINE)
-#include "starboard/window.h"
-typedef SbWindow PipelineWindow;
-#else // defined(COBALT_USE_SBPLAYER_PIPELINE)
-typedef void* PipelineWindow;
-#endif // defined(COBALT_USE_SBPLAYER_PIPELINE)
-
-namespace media {
-
-class MediaLog;
-
-// Pipeline contains the common interface for media pipelines. It provides
-// functions to perform asynchronous initialization, pausing, seeking and
-// playing.
-class MEDIA_EXPORT Pipeline : public base::RefCountedThreadSafe<Pipeline> {
- public:
- // Return true if the punch through box should be rendered. Return false if
- // no punch through box should be rendered.
- typedef base::Callback<bool(const gfx::Rect&)> SetBoundsCB;
-
- // Buffering states the pipeline transitions between during playback.
- // kHaveMetadata:
- // Indicates that the following things are known:
- // content duration, natural size, start time, and whether the content has
- // audio and/or video in supported formats.
- // kPrerollCompleted:
- // All renderers have buffered enough data to satisfy preroll and are ready
- // to start playback.
- enum BufferingState {
- kHaveMetadata,
- kPrerollCompleted,
- };
-
- typedef base::Callback<void(BufferingState)> BufferingStateCB;
-
- static scoped_refptr<Pipeline> Create(
- PipelineWindow window,
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- MediaLog* media_log);
-
- virtual ~Pipeline() {}
-
- virtual void Suspend() {}
- virtual void Resume() {}
-
- // Build a pipeline to using the given filter collection to construct a filter
- // chain, executing |seek_cb| when the initial seek/preroll has completed.
- //
- // |filter_collection| must be a complete collection containing a demuxer,
- // audio/video decoders, and audio/video renderers. Failing to do so will
- // result in a crash.
- //
- // The following permanent callbacks will be executed as follows up until
- // Stop() has completed:
- // |decryptor_ready_cb| can be used if Pipeline needs to be notified when
- // the Decryptor is ready.
- // |ended_cb| will be executed whenever the media reaches the end.
- // |error_cb| will be executed whenever an error occurs but hasn't
- // been reported already through another callback.
- // |buffering_state_cb| Optional callback that will be executed whenever the
- // pipeline's buffering state changes.
- // |duration_change_cb| optional callback that will be executed whenever the
- // presentation duration changes.
- // It is an error to call this method after the pipeline has already started.
- virtual void Start(scoped_ptr<FilterCollection> filter_collection,
- const SetDecryptorReadyCB& decryptor_ready_cb,
- const PipelineStatusCB& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb) = 0;
-
- // Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
- // teardown has completed.
- //
- // Stop() must complete before destroying the pipeline. It it permissible to
- // call Stop() at any point during the lifetime of the pipeline.
- virtual void Stop(const base::Closure& stop_cb) = 0;
-
- // Attempt to seek to the position specified by time. |seek_cb| will be
- // executed when the all filters in the pipeline have processed the seek.
- //
- // Clients are expected to call GetMediaTime() to check whether the seek
- // succeeded.
- //
- // It is an error to call this method if the pipeline has not started.
- virtual void Seek(base::TimeDelta time, const PipelineStatusCB& seek_cb) = 0;
-
- // Returns true if the media has audio.
- virtual bool HasAudio() const = 0;
-
- // Returns true if the media has video.
- virtual bool HasVideo() const = 0;
-
- // Gets the current playback rate of the pipeline. When the pipeline is
- // started, the playback rate will be 0.0f. A rate of 1.0f indicates
- // that the pipeline is rendering the media at the standard rate. Valid
- // values for playback rate are >= 0.0f.
- virtual float GetPlaybackRate() const = 0;
-
- // Attempt to adjust the playback rate. Setting a playback rate of 0.0f pauses
- // all rendering of the media. A rate of 1.0f indicates a normal playback
- // rate. Values for the playback rate must be greater than or equal to 0.0f.
- virtual void SetPlaybackRate(float playback_rate) = 0;
-
- // Gets the current volume setting being used by the audio renderer. When
- // the pipeline is started, this value will be 1.0f. Valid values range
- // from 0.0f to 1.0f.
- virtual float GetVolume() const = 0;
-
- // Attempt to set the volume of the audio renderer. Valid values for volume
- // range from 0.0f (muted) to 1.0f (full volume). This value affects all
- // channels proportionately for multi-channel audio streams.
- virtual void SetVolume(float volume) = 0;
-
- // Returns the current media playback time, which progresses from 0 until
- // GetMediaDuration().
- virtual base::TimeDelta GetMediaTime() const = 0;
-
- // Get approximate time ranges of buffered media.
- virtual Ranges<base::TimeDelta> GetBufferedTimeRanges() = 0;
-
- // Get the duration of the media in microseconds. If the duration has not
- // been determined yet, then returns 0.
- virtual base::TimeDelta GetMediaDuration() const = 0;
-
- // Get the total size of the media file. If the size has not yet been
- // determined or can not be determined, this value is 0.
- virtual int64 GetTotalBytes() const = 0;
-
- // Gets the natural size of the video output in pixel units. If there is no
- // video or the video has not been rendered yet, the width and height will
- // be 0.
- virtual void GetNaturalVideoSize(gfx::Size* out_size) const = 0;
-
- // Return true if loading progress has been made since the last time this
- // method was called.
- virtual bool DidLoadingProgress() const = 0;
-
- // Gets the current pipeline statistics.
- virtual PipelineStatistics GetStatistics() const = 0;
-
- // Get the SetBoundsCB used to set the bounds of the video frame.
- virtual SetBoundsCB GetSetBoundsCB() { return SetBoundsCB(); }
-
- // Updates the player's preference for decode-to-texture versus punch through.
- virtual void SetDecodeToTextureOutputMode(bool /*enabled*/) {}
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_PIPELINE_H_
diff --git a/src/media/base/pipeline_impl.cc b/src/media/base/pipeline_impl.cc
deleted file mode 100644
index aff759d..0000000
--- a/src/media/base/pipeline_impl.cc
+++ /dev/null
@@ -1,1005 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/pipeline_impl.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/callback_helpers.h"
-#include "base/compiler_specific.h"
-#include "base/message_loop.h"
-#include "base/metrics/histogram.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
-#include "base/string_util.h"
-#include "base/synchronization/condition_variable.h"
-#include "media/base/audio_decoder.h"
-#include "media/base/audio_renderer.h"
-#include "media/base/clock.h"
-#include "media/base/filter_collection.h"
-#include "media/base/media_log.h"
-#if defined(__LB_SHELL__) || defined(COBALT)
-#include "media/base/shell_media_platform.h"
-#include "media/base/shell_media_statistics.h"
-#include "media/base/shell_video_frame_provider.h"
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-#include "media/base/video_decoder.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_renderer.h"
-
-using base::TimeDelta;
-
-namespace media {
-
-PipelineStatusNotification::PipelineStatusNotification()
- : cv_(&lock_), status_(PIPELINE_OK), notified_(false) {
-}
-
-PipelineStatusNotification::~PipelineStatusNotification() {
- DCHECK(notified_);
-}
-
-PipelineStatusCB PipelineStatusNotification::Callback() {
- return base::Bind(&PipelineStatusNotification::Notify,
- base::Unretained(this));
-}
-
-void PipelineStatusNotification::Notify(media::PipelineStatus status) {
- base::AutoLock auto_lock(lock_);
- DCHECK(!notified_);
- notified_ = true;
- status_ = status;
- cv_.Signal();
-}
-
-void PipelineStatusNotification::Wait() {
- base::AutoLock auto_lock(lock_);
- while (!notified_)
- cv_.Wait();
-}
-
-media::PipelineStatus PipelineStatusNotification::status() {
- base::AutoLock auto_lock(lock_);
- DCHECK(notified_);
- return status_;
-}
-
-scoped_refptr<Pipeline> Pipeline::Create(
- PipelineWindow window,
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- MediaLog* media_log) {
- UNREFERENCED_PARAMETER(window);
- return new PipelineImpl(message_loop, media_log);
-}
-
-PipelineImpl::PipelineImpl(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- MediaLog* media_log)
- : message_loop_(message_loop),
- media_log_(media_log),
- running_(false),
- did_loading_progress_(false),
- total_bytes_(0),
- natural_size_(0, 0),
- volume_(1.0f),
- playback_rate_(0.0f),
- clock_(new Clock(&base::Time::Now)),
- waiting_for_clock_update_(false),
- status_(PIPELINE_OK),
- has_audio_(false),
- has_video_(false),
- state_(kCreated),
- audio_ended_(false),
- video_ended_(false),
- audio_disabled_(false),
- creation_time_(base::Time::Now()) {
- media_log_->AddEvent(
- media_log_->CreatePipelineStateChangedEvent(GetStateString(kCreated)));
- media_log_->AddEvent(
- media_log_->CreateEvent(MediaLogEvent::PIPELINE_CREATED));
-}
-
-PipelineImpl::~PipelineImpl() {
- // TODO(scherkus): Reenable after figuring out why this is firing, see
- // http://crbug.com/148405
-#if 0
- DCHECK(thread_checker_.CalledOnValidThread())
- << "PipelineImpl must be destroyed on same thread that created it";
-#endif
- DCHECK(!running_) << "Stop() must complete before destroying object";
- DCHECK(stop_cb_.is_null());
- DCHECK(seek_cb_.is_null());
-
- media_log_->AddEvent(
- media_log_->CreateEvent(MediaLogEvent::PIPELINE_DESTROYED));
-}
-
-void PipelineImpl::Suspend() {
- // PipelineImpl::Suspend() is only called during quitting. It is blocking
- // and may take a long time.
- base::WaitableEvent waiter(false, false);
- DLOG(INFO) << "Trying to stop media pipeline.";
- Stop(base::Bind(&base::WaitableEvent::Signal, base::Unretained(&waiter)));
- waiter.Wait();
- DLOG(INFO) << "Media pipeline suspended.";
-}
-
-void PipelineImpl::Resume() {
- // PipelineImpl doesn't support Resume().
- NOTREACHED();
-}
-
-void PipelineImpl::Start(scoped_ptr<FilterCollection> collection,
- const SetDecryptorReadyCB& decryptor_ready_cb,
- const PipelineStatusCB& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb) {
- DCHECK_EQ(collection->GetAudioDecoders()->size(), 1);
- DCHECK_EQ(collection->GetVideoDecoders()->size(), 1);
-
- base::AutoLock auto_lock(lock_);
- CHECK(!running_) << "Media pipeline is already running";
- DCHECK(!buffering_state_cb.is_null());
-
- running_ = true;
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::StartTask, this,
- base::Passed(&collection), ended_cb, error_cb,
- seek_cb, buffering_state_cb, duration_change_cb));
-}
-
-void PipelineImpl::Stop(const base::Closure& stop_cb) {
- base::AutoLock auto_lock(lock_);
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&PipelineImpl::StopTask, this, stop_cb));
-}
-
-void PipelineImpl::Seek(TimeDelta time, const PipelineStatusCB& seek_cb) {
- base::AutoLock auto_lock(lock_);
- if (running_) {
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::SeekTask, this, time, seek_cb));
- } else {
- // This callback will be silently ignored if there is a reload as the
- // PipelineImpl will be killed in that case. This is acceptable since the
- // app
- // needn't rely on this callback.
- message_loop_->PostTask(
- FROM_HERE, base::Bind(seek_cb, PIPELINE_ERROR_INVALID_STATE));
- }
-}
-
-bool PipelineImpl::IsRunning() const {
- base::AutoLock auto_lock(lock_);
- return running_;
-}
-
-bool PipelineImpl::HasAudio() const {
- base::AutoLock auto_lock(lock_);
- return has_audio_;
-}
-
-bool PipelineImpl::HasVideo() const {
- base::AutoLock auto_lock(lock_);
- return has_video_;
-}
-
-float PipelineImpl::GetPlaybackRate() const {
- base::AutoLock auto_lock(lock_);
- return playback_rate_;
-}
-
-void PipelineImpl::SetPlaybackRate(float playback_rate) {
- if (playback_rate < 0.0f)
- return;
-
- base::AutoLock auto_lock(lock_);
- playback_rate_ = playback_rate;
- if (running_) {
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::PlaybackRateChangedTask, this,
- playback_rate));
- }
-}
-
-float PipelineImpl::GetVolume() const {
- base::AutoLock auto_lock(lock_);
- return volume_;
-}
-
-void PipelineImpl::SetVolume(float volume) {
- if (volume < 0.0f || volume > 1.0f)
- return;
-
- base::AutoLock auto_lock(lock_);
- volume_ = volume;
- if (running_) {
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::VolumeChangedTask, this, volume));
- }
-}
-
-TimeDelta PipelineImpl::GetMediaTime() const {
- base::AutoLock auto_lock(lock_);
- return clock_->Elapsed();
-}
-
-Ranges<TimeDelta> PipelineImpl::GetBufferedTimeRanges() {
- base::AutoLock auto_lock(lock_);
- Ranges<TimeDelta> time_ranges;
- for (size_t i = 0; i < buffered_time_ranges_.size(); ++i) {
- time_ranges.Add(buffered_time_ranges_.start(i),
- buffered_time_ranges_.end(i));
- }
- if (clock_->Duration() == TimeDelta() || total_bytes_ == 0)
- return time_ranges;
- for (size_t i = 0; i < buffered_byte_ranges_.size(); ++i) {
- TimeDelta start = TimeForByteOffset_Locked(buffered_byte_ranges_.start(i));
- TimeDelta end = TimeForByteOffset_Locked(buffered_byte_ranges_.end(i));
- // Cap approximated buffered time at the length of the video.
- end = std::min(end, clock_->Duration());
- time_ranges.Add(start, end);
- }
-
- return time_ranges;
-}
-
-TimeDelta PipelineImpl::GetMediaDuration() const {
- base::AutoLock auto_lock(lock_);
- return clock_->Duration();
-}
-
-int64 PipelineImpl::GetTotalBytes() const {
- base::AutoLock auto_lock(lock_);
- return total_bytes_;
-}
-
-void PipelineImpl::GetNaturalVideoSize(gfx::Size* out_size) const {
- CHECK(out_size);
- base::AutoLock auto_lock(lock_);
- *out_size = natural_size_;
-}
-
-bool PipelineImpl::DidLoadingProgress() const {
- base::AutoLock auto_lock(lock_);
- bool ret = did_loading_progress_;
- did_loading_progress_ = false;
- return ret;
-}
-
-PipelineStatistics PipelineImpl::GetStatistics() const {
- base::AutoLock auto_lock(lock_);
- return statistics_;
-}
-
-void PipelineImpl::SetClockForTesting(Clock* clock) {
- clock_.reset(clock);
-}
-
-void PipelineImpl::SetErrorForTesting(PipelineStatus status) {
- SetError(status);
-}
-
-void PipelineImpl::SetState(State next_state) {
- if (state_ != kStarted && next_state == kStarted &&
- !creation_time_.is_null()) {
- UMA_HISTOGRAM_TIMES(
- "Media.TimeToPipelineStarted", base::Time::Now() - creation_time_);
- creation_time_ = base::Time();
- }
-
- DVLOG(2) << GetStateString(state_) << " -> " << GetStateString(next_state);
-
- state_ = next_state;
- media_log_->AddEvent(
- media_log_->CreatePipelineStateChangedEvent(GetStateString(next_state)));
-}
-
-#define RETURN_STRING(state) case state: return #state;
-
-const char* PipelineImpl::GetStateString(State state) {
- switch (state) {
- RETURN_STRING(kCreated);
- RETURN_STRING(kInitDemuxer);
- RETURN_STRING(kInitAudioRenderer);
- RETURN_STRING(kInitVideoRenderer);
- RETURN_STRING(kInitPrerolling);
- RETURN_STRING(kSeeking);
- RETURN_STRING(kStarting);
- RETURN_STRING(kStarted);
- RETURN_STRING(kStopping);
- RETURN_STRING(kStopped);
- }
- NOTREACHED();
- return "INVALID";
-}
-
-#undef RETURN_STRING
-
-PipelineImpl::State PipelineImpl::GetNextState() const {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(stop_cb_.is_null())
- << "State transitions don't happen when stopping";
- DCHECK_EQ(status_, PIPELINE_OK)
- << "State transitions don't happen when there's an error: " << status_;
-
- switch (state_) {
- case kCreated:
- return kInitDemuxer;
-
- case kInitDemuxer:
- if (demuxer_->GetStream(DemuxerStream::AUDIO))
- return kInitAudioRenderer;
- if (demuxer_->GetStream(DemuxerStream::VIDEO))
- return kInitVideoRenderer;
- return kInitPrerolling;
-
- case kInitAudioRenderer:
- if (demuxer_->GetStream(DemuxerStream::VIDEO))
- return kInitVideoRenderer;
- return kInitPrerolling;
-
- case kInitVideoRenderer:
- return kInitPrerolling;
-
- case kInitPrerolling:
- return kStarting;
-
- case kSeeking:
- return kStarting;
-
- case kStarting:
- return kStarted;
-
- case kStarted:
- case kStopping:
- case kStopped:
- break;
- }
- NOTREACHED() << "State has no transition: " << state_;
- return state_;
-}
-
-void PipelineImpl::OnDemuxerError(PipelineStatus error) {
- SetError(error);
-}
-
-void PipelineImpl::SetError(PipelineStatus error) {
- DCHECK(IsRunning());
- DCHECK_NE(PIPELINE_OK, error);
- VLOG(1) << "Media pipeline error: " << error;
-
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::ErrorChangedTask, this, error));
-
- media_log_->AddEvent(media_log_->CreatePipelineErrorEvent(error));
-}
-
-void PipelineImpl::OnAudioDisabled() {
- DCHECK(IsRunning());
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&PipelineImpl::AudioDisabledTask, this));
- media_log_->AddEvent(
- media_log_->CreateEvent(MediaLogEvent::AUDIO_RENDERER_DISABLED));
-}
-
-void PipelineImpl::OnAudioTimeUpdate(TimeDelta time, TimeDelta max_time) {
- DCHECK_LE(time.InMicroseconds(), max_time.InMicroseconds());
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
-
- if (!has_audio_)
- return;
- if (waiting_for_clock_update_ && time < clock_->Elapsed())
- return;
-
- // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
- // http://crbug.com/137973
- if (state_ == kSeeking)
- return;
-
- clock_->SetTime(time, max_time);
- StartClockIfWaitingForTimeUpdate_Locked();
-}
-
-void PipelineImpl::OnVideoTimeUpdate(TimeDelta max_time) {
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
-
- if (has_audio_)
- return;
-
- // TODO(scherkus): |state_| should only be accessed on pipeline thread, see
- // http://crbug.com/137973
- if (state_ == kSeeking)
- return;
-
- DCHECK(!waiting_for_clock_update_);
- clock_->SetMaxTime(max_time);
-}
-
-void PipelineImpl::SetDuration(TimeDelta duration) {
- media_log_->AddEvent(
- media_log_->CreateTimeEvent(
- MediaLogEvent::DURATION_SET, "duration", duration));
- UMA_HISTOGRAM_LONG_TIMES("Media.Duration", duration);
-
- base::AutoLock auto_lock(lock_);
- clock_->SetDuration(duration);
- if (!duration_change_cb_.is_null())
- duration_change_cb_.Run();
-}
-
-void PipelineImpl::SetTotalBytes(int64 total_bytes) {
- DCHECK(IsRunning());
- media_log_->AddEvent(
- media_log_->CreateStringEvent(
- MediaLogEvent::TOTAL_BYTES_SET, "total_bytes",
- base::Int64ToString(total_bytes)));
- int64 total_mbytes = total_bytes >> 20;
- if (total_mbytes > kint32max)
- total_mbytes = kint32max;
- UMA_HISTOGRAM_CUSTOM_COUNTS(
- "Media.TotalMBytes", static_cast<int32>(total_mbytes), 1, kint32max, 50);
-
- base::AutoLock auto_lock(lock_);
- total_bytes_ = total_bytes;
-}
-
-TimeDelta PipelineImpl::TimeForByteOffset_Locked(int64 byte_offset) const {
- lock_.AssertAcquired();
- TimeDelta time_offset = byte_offset * clock_->Duration() / total_bytes_;
- // Since the byte->time calculation is approximate, fudge the beginning &
- // ending areas to look better.
- TimeDelta epsilon = clock_->Duration() / 100;
- if (time_offset < epsilon)
- return TimeDelta();
- if (time_offset + epsilon > clock_->Duration())
- return clock_->Duration();
- return time_offset;
-}
-
-void PipelineImpl::OnStateTransition(PipelineStatus status) {
- // Force post to process state transitions after current execution frame.
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::StateTransitionTask, this, status));
-}
-
-void PipelineImpl::StateTransitionTask(PipelineStatus status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // No-op any state transitions if we're stopping.
- if (state_ == kStopping || state_ == kStopped)
- return;
-
- // Preserve existing abnormal status, otherwise update based on the result of
- // the previous operation.
- status_ = (status_ != PIPELINE_OK ? status_ : status);
-
- if (status_ != PIPELINE_OK) {
- ErrorChangedTask(status_);
- return;
- }
-
- // Guard against accidentally clearing |pending_callbacks_| for states that
- // use it as well as states that should not be using it.
- //
- // TODO(scherkus): Make every state transition use |pending_callbacks_|.
- DCHECK_EQ(pending_callbacks_.get() != NULL,
- (state_ == kInitPrerolling || state_ == kStarting ||
- state_ == kSeeking));
- pending_callbacks_.reset();
-
- PipelineStatusCB done_cb = base::Bind(&PipelineImpl::OnStateTransition, this);
-
- // Switch states, performing any entrance actions for the new state as well.
- SetState(GetNextState());
- switch (state_) {
- case kInitDemuxer:
- return InitializeDemuxer(done_cb);
-
- case kInitAudioRenderer:
- return InitializeAudioRenderer(done_cb);
-
- case kInitVideoRenderer:
- return InitializeVideoRenderer(done_cb);
-
- case kInitPrerolling:
- filter_collection_.reset();
- {
- base::AutoLock l(lock_);
- // We do not want to start the clock running. We only want to set the
- // base media time so our timestamp calculations will be correct.
- clock_->SetTime(demuxer_->GetStartTime(), demuxer_->GetStartTime());
-
- // TODO(scherkus): |has_audio_| should be true no matter what --
- // otherwise people with muted/disabled sound cards will make our
- // default controls look as if every video doesn't contain an audio
- // track.
- has_audio_ = audio_renderer_ != NULL && !audio_disabled_;
- has_video_ = video_renderer_ != NULL;
- }
- if (!audio_renderer_ && !video_renderer_) {
- done_cb.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
- return;
- }
-
- buffering_state_cb_.Run(kHaveMetadata);
-
- return DoInitialPreroll(done_cb);
-
- case kStarting:
- return DoPlay(done_cb);
-
- case kStarted:
-#if defined(__LB_SHELL__) || defined(COBALT)
- ShellMediaStatistics::Instance().OnPlaybackBegin();
-#endif // defined(__LB_SHELL__) || defined(COBALT)
- {
- base::AutoLock l(lock_);
- // We use audio stream to update the clock. So if there is such a
- // stream, we pause the clock until we receive a valid timestamp.
- waiting_for_clock_update_ = true;
- if (!has_audio_) {
- clock_->SetMaxTime(clock_->Duration());
- StartClockIfWaitingForTimeUpdate_Locked();
- }
- }
-
- DCHECK(!seek_cb_.is_null());
- DCHECK_EQ(status_, PIPELINE_OK);
-
- // Fire canplaythrough immediately after playback begins because of
- // crbug.com/106480.
- // TODO(vrk): set ready state to HaveFutureData when bug above is fixed.
- buffering_state_cb_.Run(kPrerollCompleted);
- return base::ResetAndReturn(&seek_cb_).Run(PIPELINE_OK);
-
- case kStopping:
- case kStopped:
- case kCreated:
- case kSeeking:
- NOTREACHED() << "State has no transition: " << state_;
- return;
- }
-}
-
-void PipelineImpl::DoInitialPreroll(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
-
- base::TimeDelta seek_timestamp = demuxer_->GetStartTime();
-
- // Preroll renderers.
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Preroll, audio_renderer_, seek_timestamp));
- }
-
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Preroll, video_renderer_, seek_timestamp));
- }
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
-}
-
-void PipelineImpl::DoSeek(base::TimeDelta seek_timestamp,
- const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
-
- // Pause.
- if (audio_renderer_)
- bound_fns.Push(base::Bind(&AudioRenderer::Pause, audio_renderer_));
- if (video_renderer_)
- bound_fns.Push(base::Bind(&VideoRenderer::Pause, video_renderer_));
-
- // Flush.
- if (audio_renderer_)
- bound_fns.Push(base::Bind(&AudioRenderer::Flush, audio_renderer_));
- if (video_renderer_)
- bound_fns.Push(base::Bind(&VideoRenderer::Flush, video_renderer_));
-
- // Seek demuxer.
- bound_fns.Push(base::Bind(
- &Demuxer::Seek, demuxer_, seek_timestamp));
-
- // Preroll renderers.
- if (audio_renderer_) {
- bound_fns.Push(base::Bind(
- &AudioRenderer::Preroll, audio_renderer_, seek_timestamp));
- }
-
- if (video_renderer_) {
- bound_fns.Push(base::Bind(
- &VideoRenderer::Preroll, video_renderer_, seek_timestamp));
- }
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
-}
-
-void PipelineImpl::DoPlay(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
-
- PlaybackRateChangedTask(GetPlaybackRate());
- VolumeChangedTask(GetVolume());
-
- if (audio_renderer_)
- bound_fns.Push(base::Bind(&AudioRenderer::Play, audio_renderer_));
-
- if (video_renderer_)
- bound_fns.Push(base::Bind(&VideoRenderer::Play, video_renderer_));
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
-}
-
-void PipelineImpl::DoStop(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!pending_callbacks_.get());
- SerialRunner::Queue bound_fns;
-
- if (demuxer_)
- bound_fns.Push(base::Bind(&Demuxer::Stop, demuxer_));
-
- if (audio_renderer_)
- bound_fns.Push(base::Bind(&AudioRenderer::Stop, audio_renderer_));
-
- if (video_renderer_)
- bound_fns.Push(base::Bind(&VideoRenderer::Stop, video_renderer_));
-
- pending_callbacks_ = SerialRunner::Run(bound_fns, done_cb);
-}
-
-void PipelineImpl::OnStopCompleted(PipelineStatus status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_EQ(state_, kStopping);
- {
- base::AutoLock l(lock_);
- running_ = false;
- }
-
- SetState(kStopped);
- pending_callbacks_.reset();
- filter_collection_.reset();
- audio_renderer_ = NULL;
- video_renderer_ = NULL;
- demuxer_ = NULL;
-
- // If we stop during initialization/seeking we want to run |seek_cb_|
- // followed by |stop_cb_| so we don't leave outstanding callbacks around.
- if (!seek_cb_.is_null()) {
- base::ResetAndReturn(&seek_cb_).Run(status_);
- error_cb_.Reset();
- }
- if (!stop_cb_.is_null()) {
- base::ResetAndReturn(&stop_cb_).Run();
- error_cb_.Reset();
- }
- if (!error_cb_.is_null()) {
- DCHECK_NE(status_, PIPELINE_OK);
- base::ResetAndReturn(&error_cb_).Run(status_);
- }
-}
-
-void PipelineImpl::AddBufferedByteRange(int64 start, int64 end) {
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
- buffered_byte_ranges_.Add(start, end);
- did_loading_progress_ = true;
-}
-
-void PipelineImpl::AddBufferedTimeRange(base::TimeDelta start,
- base::TimeDelta end) {
- DCHECK(IsRunning());
- base::AutoLock auto_lock(lock_);
- buffered_time_ranges_.Add(start, end);
- did_loading_progress_ = true;
-}
-
-void PipelineImpl::OnNaturalVideoSizeChanged(const gfx::Size& size) {
- DCHECK(IsRunning());
- media_log_->AddEvent(media_log_->CreateVideoSizeSetEvent(
- size.width(), size.height()));
-
- base::AutoLock auto_lock(lock_);
- natural_size_ = size;
-}
-
-void PipelineImpl::OnAudioRendererEnded() {
- // Force post to process ended messages after current execution frame.
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::DoAudioRendererEnded, this));
- media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::AUDIO_ENDED));
-}
-
-void PipelineImpl::OnVideoRendererEnded() {
- // Force post to process ended messages after current execution frame.
- message_loop_->PostTask(
- FROM_HERE, base::Bind(&PipelineImpl::DoVideoRendererEnded, this));
- media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::VIDEO_ENDED));
-}
-
-// Called from any thread.
-void PipelineImpl::OnUpdateStatistics(const PipelineStatistics& stats) {
- base::AutoLock auto_lock(lock_);
- statistics_.audio_bytes_decoded += stats.audio_bytes_decoded;
- statistics_.video_bytes_decoded += stats.video_bytes_decoded;
- statistics_.video_frames_decoded += stats.video_frames_decoded;
- statistics_.video_frames_dropped += stats.video_frames_dropped;
-}
-
-void PipelineImpl::StartTask(scoped_ptr<FilterCollection> filter_collection,
- const PipelineStatusCB& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- CHECK_EQ(kCreated, state_)
- << "Media pipeline cannot be started more than once";
-
- filter_collection_ = filter_collection.Pass();
- ended_cb_ = ended_cb;
- error_cb_ = error_cb;
- seek_cb_ = seek_cb;
- buffering_state_cb_ = buffering_state_cb;
- duration_change_cb_ = duration_change_cb;
-
- StateTransitionTask(PIPELINE_OK);
-}
-
-void PipelineImpl::StopTask(const base::Closure& stop_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(stop_cb_.is_null());
-
- if (state_ == kStopped) {
- stop_cb.Run();
- return;
- }
-
- stop_cb_ = stop_cb;
-
- // We may already be stopping due to a runtime error.
- if (state_ == kStopping) {
- return;
- }
-
- SetState(kStopping);
- pending_callbacks_.reset();
- DoStop(base::Bind(&PipelineImpl::OnStopCompleted, this));
-}
-
-void PipelineImpl::ErrorChangedTask(PipelineStatus error) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
-
- if (state_ == kStopping || state_ == kStopped)
- return;
-
- SetState(kStopping);
- pending_callbacks_.reset();
- status_ = error;
-
- DoStop(base::Bind(&PipelineImpl::OnStopCompleted, this));
-}
-
-void PipelineImpl::PlaybackRateChangedTask(float playback_rate) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Playback rate changes are only carried out while playing.
- if (state_ != kStarting && state_ != kStarted)
- return;
-
- {
- base::AutoLock auto_lock(lock_);
- clock_->SetPlaybackRate(playback_rate);
- }
-
- if (demuxer_)
- demuxer_->SetPlaybackRate(playback_rate);
- if (audio_renderer_)
- audio_renderer_->SetPlaybackRate(playback_rate_);
- if (video_renderer_)
- video_renderer_->SetPlaybackRate(playback_rate_);
-}
-
-void PipelineImpl::VolumeChangedTask(float volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Volume changes are only carried out while playing.
- if (state_ != kStarting && state_ != kStarted)
- return;
-
- if (audio_renderer_)
- audio_renderer_->SetVolume(volume);
-}
-
-void PipelineImpl::SeekTask(TimeDelta time, const PipelineStatusCB& seek_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(stop_cb_.is_null());
-
- // Suppress seeking if we're not fully started.
- if (state_ != kStarted) {
- DCHECK(state_ == kStopping || state_ == kStopped)
- << "Receive extra seek in unexpected state: " << state_;
-
- // TODO(scherkus): should we run the callback? I'm tempted to say the API
- // will only execute the first Seek() request.
- DVLOG(1) << "Media pipeline has not started, ignoring seek to "
- << time.InMicroseconds() << " (current state: " << state_ << ")";
- return;
- }
-
- DCHECK(seek_cb_.is_null());
-
- SetState(kSeeking);
- base::TimeDelta seek_timestamp = std::max(time, demuxer_->GetStartTime());
- seek_cb_ = seek_cb;
- audio_ended_ = false;
- video_ended_ = false;
-
- // Kick off seeking!
- {
- base::AutoLock auto_lock(lock_);
- if (clock_->IsPlaying())
- clock_->Pause();
- waiting_for_clock_update_ = false;
- clock_->SetTime(seek_timestamp, seek_timestamp);
- }
- DoSeek(seek_timestamp, base::Bind(&PipelineImpl::OnStateTransition, this));
-}
-
-void PipelineImpl::DoAudioRendererEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ != kStarted)
- return;
-
- DCHECK(!audio_ended_);
- audio_ended_ = true;
-
- // Start clock since there is no more audio to trigger clock updates.
- if (!audio_disabled_) {
- base::AutoLock auto_lock(lock_);
- clock_->SetMaxTime(clock_->Duration());
- StartClockIfWaitingForTimeUpdate_Locked();
- }
-
- RunEndedCallbackIfNeeded();
-}
-
-void PipelineImpl::DoVideoRendererEnded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (state_ != kStarted)
- return;
-
- DCHECK(!video_ended_);
- video_ended_ = true;
-
- RunEndedCallbackIfNeeded();
-}
-
-void PipelineImpl::RunEndedCallbackIfNeeded() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- if (audio_renderer_ && !audio_ended_ && !audio_disabled_)
- return;
-
- if (video_renderer_ && !video_ended_)
- return;
-
- {
- base::AutoLock auto_lock(lock_);
- clock_->EndOfStream();
- }
-
- DLOG(INFO) << "video playback completed successfully! :)";
-
- // TODO(scherkus): Change |ended_cb_| into a Closure.
- DCHECK_EQ(status_, PIPELINE_OK);
- ended_cb_.Run(status_);
-}
-
-void PipelineImpl::AudioDisabledTask() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- base::AutoLock auto_lock(lock_);
- has_audio_ = false;
- audio_disabled_ = true;
-
- // Notify our demuxer that we're no longer rendering audio.
- demuxer_->OnAudioRendererDisabled();
-
- // Start clock since there is no more audio to trigger clock updates.
- clock_->SetMaxTime(clock_->Duration());
- StartClockIfWaitingForTimeUpdate_Locked();
-}
-
-void PipelineImpl::InitializeDemuxer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- demuxer_ = filter_collection_->GetDemuxer();
- demuxer_->Initialize(this, done_cb);
-}
-
-void PipelineImpl::InitializeAudioRenderer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- scoped_refptr<DemuxerStream> stream =
- demuxer_->GetStream(DemuxerStream::AUDIO);
- DCHECK(stream);
-
- filter_collection_->SelectAudioRenderer(&audio_renderer_);
- audio_renderer_->Initialize(
- stream, *filter_collection_->GetAudioDecoders(), done_cb,
- base::Bind(&PipelineImpl::OnUpdateStatistics, this),
- base::Bind(&PipelineImpl::OnAudioUnderflow, this),
- base::Bind(&PipelineImpl::OnAudioTimeUpdate, this),
- base::Bind(&PipelineImpl::OnAudioRendererEnded, this),
- base::Bind(&PipelineImpl::OnAudioDisabled, this),
- base::Bind(&PipelineImpl::SetError, this));
- filter_collection_->GetAudioDecoders()->clear();
-}
-
-void PipelineImpl::InitializeVideoRenderer(const PipelineStatusCB& done_cb) {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- scoped_refptr<DemuxerStream> stream =
- demuxer_->GetStream(DemuxerStream::VIDEO);
- DCHECK(stream);
-
- {
- // Get an initial natural size so we have something when we signal
- // the kHaveMetadata buffering state.
- base::AutoLock l(lock_);
- natural_size_ = stream->video_decoder_config().natural_size();
- }
-
- filter_collection_->SelectVideoRenderer(&video_renderer_);
- video_renderer_->Initialize(
- stream, *filter_collection_->GetVideoDecoders(), done_cb,
- base::Bind(&PipelineImpl::OnUpdateStatistics, this),
- base::Bind(&PipelineImpl::OnVideoTimeUpdate, this),
- base::Bind(&PipelineImpl::OnNaturalVideoSizeChanged, this),
- base::Bind(&PipelineImpl::OnVideoRendererEnded, this),
- base::Bind(&PipelineImpl::SetError, this),
- base::Bind(&PipelineImpl::GetMediaTime, this),
- base::Bind(&PipelineImpl::GetMediaDuration, this));
- filter_collection_->GetVideoDecoders()->clear();
-}
-
-void PipelineImpl::OnAudioUnderflow() {
- if (!message_loop_->BelongsToCurrentThread()) {
- message_loop_->PostTask(FROM_HERE,
- base::Bind(&PipelineImpl::OnAudioUnderflow, this));
- return;
- }
-
- if (state_ != kStarted)
- return;
-
- if (audio_renderer_)
- audio_renderer_->ResumeAfterUnderflow(true);
-}
-
-void PipelineImpl::StartClockIfWaitingForTimeUpdate_Locked() {
- lock_.AssertAcquired();
- if (!waiting_for_clock_update_)
- return;
-
- waiting_for_clock_update_ = false;
- clock_->Play();
-}
-
-} // namespace media
diff --git a/src/media/base/pipeline_impl.h b/src/media/base/pipeline_impl.h
deleted file mode 100644
index 8b1983f..0000000
--- a/src/media/base/pipeline_impl.h
+++ /dev/null
@@ -1,466 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_PIPELINE_IMPL_H_
-#define MEDIA_BASE_PIPELINE_IMPL_H_
-
-#include "base/callback.h"
-#include "base/gtest_prod_util.h"
-#include "base/synchronization/condition_variable.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread_checker.h"
-#include "media/base/audio_renderer.h"
-#include "media/base/demuxer.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/ranges.h"
-#include "media/base/serial_runner.h"
-
-class MessageLoop;
-
-namespace base {
-class MessageLoopProxy;
-class TimeDelta;
-}
-
-namespace media {
-
-class Clock;
-class FilterCollection;
-class MediaLog;
-class VideoRenderer;
-
-// Adapter for using asynchronous Pipeline methods in code that wants to run
-// synchronously. To use, construct an instance of this class and pass the
-// |Callback()| to the Pipeline method requiring a callback. Then Wait() for
-// the callback to get fired and call status() to see what the callback's
-// argument was. This object is for one-time use; call |Callback()| exactly
-// once.
-class MEDIA_EXPORT PipelineStatusNotification {
- public:
- PipelineStatusNotification();
- ~PipelineStatusNotification();
-
- // See class-level comment for usage.
- PipelineStatusCB Callback();
- void Wait();
- PipelineStatus status();
-
- private:
- void Notify(media::PipelineStatus status);
-
- base::Lock lock_;
- base::ConditionVariable cv_;
- media::PipelineStatus status_;
- bool notified_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineStatusNotification);
-};
-
-// Pipeline runs the media pipeline. Filters are created and called on the
-// message loop injected into this object. Pipeline works like a state
-// machine to perform asynchronous initialization, pausing, seeking and playing.
-//
-// Here's a state diagram that describes the lifetime of this object.
-//
-// [ *Created ] [ Any State ]
-// | Start() | Stop() / SetError()
-// V V
-// [ InitXXX (for each filter) ] [ Stopping ]
-// | |
-// V V
-// [ InitPreroll ] [ Stopped ]
-// |
-// V
-// [ Starting ] <-- [ Seeking ]
-// | ^
-// V |
-// [ Started ] ----------'
-// Seek()
-//
-// Initialization is a series of state transitions from "Created" through each
-// filter initialization state. When all filter initialization states have
-// completed, we are implicitly in a "Paused" state. At that point we simulate
-// a Seek() to the beginning of the media to give filters a chance to preroll.
-// From then on the normal Seek() transitions are carried out and we start
-// playing the media.
-//
-// If any error ever happens, this object will transition to the "Error" state
-// from any state. If Stop() is ever called, this object will transition to
-// "Stopped" state.
-class MEDIA_EXPORT PipelineImpl : public Pipeline, public DemuxerHost {
- public:
- // Constructs a media pipeline that will execute on |message_loop|.
- PipelineImpl(const scoped_refptr<base::MessageLoopProxy>& message_loop,
- MediaLog* media_log);
-
- void Suspend() OVERRIDE;
- void Resume() OVERRIDE;
-
- // Build a pipeline to using the given filter collection to construct a filter
- // chain, executing |seek_cb| when the initial seek/preroll has completed.
- //
- // |filter_collection| must be a complete collection containing a demuxer,
- // audio/video decoders, and audio/video renderers. Failing to do so will
- // result in a crash.
- //
- // The following permanent callbacks will be executed as follows up until
- // Stop() has completed:
- // |decryptor_ready_cb| can be used if Pipeline needs to be notified when
- // the Decryptor is ready.
- // |ended_cb| will be executed whenever the media reaches the end.
- // |error_cb| will be executed whenever an error occurs but hasn't
- // been reported already through another callback.
- // |buffering_state_cb| Optional callback that will be executed whenever the
- // pipeline's buffering state changes.
- // |duration_change_cb| optional callback that will be executed whenever the
- // presentation duration changes.
- // It is an error to call this method after the pipeline has already started.
- void Start(scoped_ptr<FilterCollection> filter_collection,
- const SetDecryptorReadyCB& decryptor_ready_cb,
- const PipelineStatusCB& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb) OVERRIDE;
-
- // Asynchronously stops the pipeline, executing |stop_cb| when the pipeline
- // teardown has completed.
- //
- // Stop() must complete before destroying the pipeline. It it permissible to
- // call Stop() at any point during the lifetime of the pipeline.
- void Stop(const base::Closure& stop_cb) OVERRIDE;
-
- // Attempt to seek to the position specified by time. |seek_cb| will be
- // executed when the all filters in the pipeline have processed the seek.
- //
- // Clients are expected to call GetMediaTime() to check whether the seek
- // succeeded.
- //
- // It is an error to call this method if the pipeline has not started.
- void Seek(base::TimeDelta time, const PipelineStatusCB& seek_cb) OVERRIDE;
-
- // Returns true if the pipeline has been started via Start(). If IsRunning()
- // returns true, it is expected that Stop() will be called before destroying
- // the pipeline.
- bool IsRunning() const;
-
- // Returns true if the media has audio.
- bool HasAudio() const OVERRIDE;
-
- // Returns true if the media has video.
- bool HasVideo() const OVERRIDE;
-
- // Gets the current playback rate of the pipeline. When the pipeline is
- // started, the playback rate will be 0.0f. A rate of 1.0f indicates
- // that the pipeline is rendering the media at the standard rate. Valid
- // values for playback rate are >= 0.0f.
- float GetPlaybackRate() const OVERRIDE;
-
- // Attempt to adjust the playback rate. Setting a playback rate of 0.0f pauses
- // all rendering of the media. A rate of 1.0f indicates a normal playback
- // rate. Values for the playback rate must be greater than or equal to 0.0f.
- //
- // TODO(scherkus): What about maximum rate? Does HTML5 specify a max?
- void SetPlaybackRate(float playback_rate) OVERRIDE;
-
- // Gets the current volume setting being used by the audio renderer. When
- // the pipeline is started, this value will be 1.0f. Valid values range
- // from 0.0f to 1.0f.
- float GetVolume() const OVERRIDE;
-
- // Attempt to set the volume of the audio renderer. Valid values for volume
- // range from 0.0f (muted) to 1.0f (full volume). This value affects all
- // channels proportionately for multi-channel audio streams.
- void SetVolume(float volume) OVERRIDE;
-
- // Returns the current media playback time, which progresses from 0 until
- // GetMediaDuration().
- base::TimeDelta GetMediaTime() const OVERRIDE;
-
- // Get approximate time ranges of buffered media.
- Ranges<base::TimeDelta> GetBufferedTimeRanges() OVERRIDE;
-
- // Get the duration of the media in microseconds. If the duration has not
- // been determined yet, then returns 0.
- base::TimeDelta GetMediaDuration() const OVERRIDE;
-
- // Get the total size of the media file. If the size has not yet been
- // determined or can not be determined, this value is 0.
- int64 GetTotalBytes() const OVERRIDE;
-
- // Gets the natural size of the video output in pixel units. If there is no
- // video or the video has not been rendered yet, the width and height will
- // be 0.
- void GetNaturalVideoSize(gfx::Size* out_size) const OVERRIDE;
-
- // Return true if loading progress has been made since the last time this
- // method was called.
- bool DidLoadingProgress() const OVERRIDE;
-
- // Gets the current pipeline statistics.
- PipelineStatistics GetStatistics() const OVERRIDE;
-
- void SetClockForTesting(Clock* clock);
- void SetErrorForTesting(PipelineStatus status);
-
- private:
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, GetBufferedTimeRanges);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRenderer);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, DisableAudioRendererDuringInit);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, EndedCallback);
- FRIEND_TEST_ALL_PREFIXES(PipelineTest, AudioStreamShorterThanVideo);
- friend class MediaLog;
-
- virtual ~PipelineImpl();
-
- // PipelineImpl states, as described above.
- enum State {
- kCreated,
- kInitDemuxer,
- kInitAudioRenderer,
- kInitVideoRenderer,
- kInitPrerolling,
- kSeeking,
- kStarting,
- kStarted,
- kStopping,
- kStopped,
- };
-
- // Updates |state_|. All state transitions should use this call.
- void SetState(State next_state);
-
- static const char* GetStateString(State state);
- State GetNextState() const;
-
- // Helper method that runs & resets |seek_cb_| and resets |seek_timestamp_|
- // and |seek_pending_|.
- void FinishSeek();
-
- // DataSourceHost (by way of DemuxerHost) implementation.
- virtual void SetTotalBytes(int64 total_bytes) OVERRIDE;
- virtual void AddBufferedByteRange(int64 start, int64 end) OVERRIDE;
- virtual void AddBufferedTimeRange(base::TimeDelta start,
- base::TimeDelta end) OVERRIDE;
-
- // DemuxerHost implementaion.
- virtual void SetDuration(base::TimeDelta duration) OVERRIDE;
- virtual void OnDemuxerError(PipelineStatus error) OVERRIDE;
-
- // Initiates teardown sequence in response to a runtime error.
- //
- // Safe to call from any thread.
- void SetError(PipelineStatus error);
-
- // Callback executed when the natural size of the video has changed.
- void OnNaturalVideoSizeChanged(const gfx::Size& size);
-
- // Callbacks executed when a renderer has ended.
- void OnAudioRendererEnded();
- void OnVideoRendererEnded();
-
- // Callback executed by filters to update statistics.
- void OnUpdateStatistics(const PipelineStatistics& stats);
-
- // Callback executed by audio renderer when it has been disabled.
- void OnAudioDisabled();
-
- // Callback executed by audio renderer to update clock time.
- void OnAudioTimeUpdate(base::TimeDelta time, base::TimeDelta max_time);
-
- // Callback executed by video renderer to update clock time.
- void OnVideoTimeUpdate(base::TimeDelta max_time);
-
- // The following "task" methods correspond to the public methods, but these
- // methods are run as the result of posting a task to the PipelineInternal's
- // message loop.
- void StartTask(scoped_ptr<FilterCollection> filter_collection,
- const PipelineStatusCB& ended_cb,
- const PipelineStatusCB& error_cb,
- const PipelineStatusCB& seek_cb,
- const BufferingStateCB& buffering_state_cb,
- const base::Closure& duration_change_cb);
-
- // Stops and destroys all filters, placing the pipeline in the kStopped state.
- void StopTask(const base::Closure& stop_cb);
-
- // Carries out stopping and destroying all filters, placing the pipeline in
- // the kStopped state.
- void ErrorChangedTask(PipelineStatus error);
-
- // Carries out notifying filters that the playback rate has changed.
- void PlaybackRateChangedTask(float playback_rate);
-
- // Carries out notifying filters that the volume has changed.
- void VolumeChangedTask(float volume);
-
- // Carries out notifying filters that we are seeking to a new timestamp.
- void SeekTask(base::TimeDelta time, const PipelineStatusCB& seek_cb);
-
- // Handles audio/video ended logic and running |ended_cb_|.
- void DoAudioRendererEnded();
- void DoVideoRendererEnded();
- void RunEndedCallbackIfNeeded();
-
- // Carries out disabling the audio renderer.
- void AudioDisabledTask();
-
- // Kicks off initialization for each media object, executing |done_cb| with
- // the result when completed.
- void InitializeDemuxer(const PipelineStatusCB& done_cb);
- void InitializeAudioRenderer(const PipelineStatusCB& done_cb);
- void InitializeVideoRenderer(const PipelineStatusCB& done_cb);
-
- // Kicks off destroying filters. Called by StopTask() and ErrorChangedTask().
- // When we start to tear down the pipeline, we will consider two cases:
- // 1. when pipeline has not been initialized, we will transit to stopping
- // state first.
- // 2. when pipeline has been initialized, we will first transit to pausing
- // => flushing => stopping => stopped state.
- // This will remove the race condition during stop between filters.
- void TearDownPipeline();
-
- // Compute the time corresponding to a byte offset.
- base::TimeDelta TimeForByteOffset_Locked(int64 byte_offset) const;
-
- void OnStateTransition(PipelineStatus status);
- void StateTransitionTask(PipelineStatus status);
-
- // Initiates an asynchronous preroll call sequence executing |done_cb|
- // with the final status when completed.
- void DoInitialPreroll(const PipelineStatusCB& done_cb);
-
- // Initiates an asynchronous pause-flush-seek-preroll call sequence
- // executing |done_cb| with the final status when completed.
- //
- // TODO(scherkus): Prerolling should be separate from seeking so we can report
- // finer grained ready states (HAVE_CURRENT_DATA vs. HAVE_FUTURE_DATA)
- // indepentent from seeking.
- void DoSeek(base::TimeDelta seek_timestamp, const PipelineStatusCB& done_cb);
-
- // Updates playback rate and volume and initiates an asynchronous play call
- // sequence executing |done_cb| with the final status when completed.
- void DoPlay(const PipelineStatusCB& done_cb);
-
- // Initiates an asynchronous pause-flush-stop call sequence executing
- // |done_cb| when completed.
- void DoStop(const PipelineStatusCB& done_cb);
- void OnStopCompleted(PipelineStatus status);
-
- void OnAudioUnderflow();
-
- void StartClockIfWaitingForTimeUpdate_Locked();
-
- // Message loop used to execute pipeline tasks.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
-
- // MediaLog to which to log events.
- scoped_refptr<MediaLog> media_log_;
-
- // Lock used to serialize access for the following data members.
- mutable base::Lock lock_;
-
- // Whether or not the pipeline is running.
- bool running_;
-
- // Amount of available buffered data. Set by filters.
- Ranges<int64> buffered_byte_ranges_;
- Ranges<base::TimeDelta> buffered_time_ranges_;
-
- // True when AddBufferedByteRange() has been called more recently than
- // DidLoadingProgress().
- mutable bool did_loading_progress_;
-
- // Total size of the media. Set by filters.
- int64 total_bytes_;
-
- // Video's natural width and height. Set by filters.
- gfx::Size natural_size_;
-
- // Current volume level (from 0.0f to 1.0f). This value is set immediately
- // via SetVolume() and a task is dispatched on the message loop to notify the
- // filters.
- float volume_;
-
- // Current playback rate (>= 0.0f). This value is set immediately via
- // SetPlaybackRate() and a task is dispatched on the message loop to notify
- // the filters.
- float playback_rate_;
-
- // Reference clock. Keeps track of current playback time. Uses system
- // clock and linear interpolation, but can have its time manually set
- // by filters.
- scoped_ptr<Clock> clock_;
-
- // If this value is set to true, then |clock_| is paused and we are waiting
- // for an update of the clock greater than or equal to the elapsed time to
- // start the clock.
- bool waiting_for_clock_update_;
-
- // Status of the pipeline. Initialized to PIPELINE_OK which indicates that
- // the pipeline is operating correctly. Any other value indicates that the
- // pipeline is stopped or is stopping. Clients can call the Stop() method to
- // reset the pipeline state, and restore this to PIPELINE_OK.
- PipelineStatus status_;
-
- // Whether the media contains rendered audio and video streams.
- // TODO(fischman,scherkus): replace these with checks for
- // {audio,video}_decoder_ once extraction of {Audio,Video}Decoder from the
- // Filter heirarchy is done.
- bool has_audio_;
- bool has_video_;
-
- // The following data members are only accessed by tasks posted to
- // |message_loop_|.
-
- // Member that tracks the current state.
- State state_;
-
- // Whether we've received the audio/video ended events.
- bool audio_ended_;
- bool video_ended_;
-
- // Set to true in DisableAudioRendererTask().
- bool audio_disabled_;
-
- scoped_ptr<FilterCollection> filter_collection_;
-
- // Temporary callback used for Start() and Seek().
- PipelineStatusCB seek_cb_;
-
- // Temporary callback used for Stop().
- base::Closure stop_cb_;
-
- // Permanent callbacks passed in via Start().
- PipelineStatusCB ended_cb_;
- PipelineStatusCB error_cb_;
- BufferingStateCB buffering_state_cb_;
- base::Closure duration_change_cb_;
-
- // Renderer references used for setting the volume, playback rate, and
- // determining when playback has finished.
- scoped_refptr<AudioRenderer> audio_renderer_;
- scoped_refptr<VideoRenderer> video_renderer_;
-
- // Demuxer reference used for setting the preload value.
- scoped_refptr<Demuxer> demuxer_;
-
- PipelineStatistics statistics_;
-
- // Time of pipeline creation; is non-zero only until the pipeline first
- // reaches "kStarted", at which point it is used & zeroed out.
- base::Time creation_time_;
-
- scoped_ptr<SerialRunner> pending_callbacks_;
-
- base::ThreadChecker thread_checker_;
-
- DISALLOW_COPY_AND_ASSIGN(PipelineImpl);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_PIPELINE_IMPL_H_
diff --git a/src/media/base/pipeline_impl_unittest.cc b/src/media/base/pipeline_impl_unittest.cc
deleted file mode 100644
index 59e743b..0000000
--- a/src/media/base/pipeline_impl_unittest.cc
+++ /dev/null
@@ -1,1215 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "base/stl_util.h"
-#include "base/threading/simple_thread.h"
-#include "media/base/clock.h"
-#include "media/base/gmock_callback_support.h"
-#include "media/base/media_log.h"
-#include "media/base/mock_filters.h"
-#include "media/base/pipeline_impl.h"
-#include "media/base/test_helpers.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gfx/size.h"
-
-using ::testing::_;
-using ::testing::DeleteArg;
-using ::testing::DoAll;
-// TODO(scherkus): Remove InSequence after refactoring PipelineImpl.
-using ::testing::InSequence;
-using ::testing::Invoke;
-using ::testing::InvokeWithoutArgs;
-using ::testing::Mock;
-using ::testing::NotNull;
-using ::testing::Return;
-using ::testing::ReturnRef;
-using ::testing::SaveArg;
-using ::testing::StrictMock;
-using ::testing::WithArg;
-
-namespace media {
-
-// Demuxer properties.
-static const int kTotalBytes = 1024;
-
-ACTION_P(SetDemuxerProperties, duration) {
- arg0->SetTotalBytes(kTotalBytes);
- arg0->SetDuration(duration);
-}
-
-ACTION_P2(Stop, pipeline, stop_cb) {
- pipeline->Stop(stop_cb);
-}
-
-ACTION_P2(SetError, pipeline, status) {
- pipeline->SetErrorForTesting(status);
-}
-
-// Used for setting expectations on pipeline callbacks. Using a StrictMock
-// also lets us test for missing callbacks.
-class CallbackHelper {
- public:
- CallbackHelper() {}
- virtual ~CallbackHelper() {}
-
- MOCK_METHOD1(OnStart, void(PipelineStatus));
- MOCK_METHOD1(OnSeek, void(PipelineStatus));
- MOCK_METHOD0(OnStop, void());
- MOCK_METHOD1(OnEnded, void(PipelineStatus));
- MOCK_METHOD1(OnError, void(PipelineStatus));
- MOCK_METHOD1(OnBufferingState, void(PipelineImpl::BufferingState));
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CallbackHelper);
-};
-
-// TODO(scherkus): even though some filters are initialized on separate
-// threads these test aren't flaky... why? It's because filters' Initialize()
-// is executed on |message_loop_| and the mock filters instantly call
-// InitializationComplete(), which keeps the pipeline humming along. If
-// either filters don't call InitializationComplete() immediately or filter
-// initialization is moved to a separate thread this test will become flaky.
-class PipelineTest : public ::testing::Test {
- public:
- PipelineTest()
- : pipeline_(new PipelineImpl(message_loop_.message_loop_proxy(),
- new MediaLog())) {
- mocks_.reset(new MockFilterCollection());
-
- // InitializeDemuxer() adds overriding expectations for expected non-NULL
- // streams.
- DemuxerStream* null_pointer = NULL;
- EXPECT_CALL(*mocks_->demuxer(), GetStream(_))
- .WillRepeatedly(Return(null_pointer));
-
- EXPECT_CALL(*mocks_->demuxer(), GetStartTime())
- .WillRepeatedly(Return(base::TimeDelta()));
- }
-
- virtual ~PipelineTest() {
- // Shutdown sequence.
- if (pipeline_->IsRunning()) {
- EXPECT_CALL(*mocks_->demuxer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- if (audio_stream_)
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- if (video_stream_)
- EXPECT_CALL(*mocks_->video_renderer(), Stop(_))
- .WillOnce(RunClosure<0>());
- }
-
- // Expect a stop callback if we were started.
- EXPECT_CALL(callbacks_, OnStop());
- pipeline_->Stop(base::Bind(&CallbackHelper::OnStop,
- base::Unretained(&callbacks_)));
- message_loop_.RunUntilIdle();
-
- pipeline_ = NULL;
- mocks_.reset();
- }
-
- protected:
- // Sets up expectations to allow the demuxer to initialize.
- typedef std::vector<MockDemuxerStream*> MockDemuxerStreamVector;
- void InitializeDemuxer(MockDemuxerStreamVector* streams,
- const base::TimeDelta& duration) {
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _))
- .WillOnce(DoAll(SetDemuxerProperties(duration),
- RunCallback<1>(PIPELINE_OK)));
-
- // Configure the demuxer to return the streams.
- for (size_t i = 0; i < streams->size(); ++i) {
- scoped_refptr<DemuxerStream> stream((*streams)[i]);
- EXPECT_CALL(*mocks_->demuxer(), GetStream(stream->type()))
- .WillRepeatedly(Return(stream));
- }
- }
-
- void InitializeDemuxer(MockDemuxerStreamVector* streams) {
- // Initialize with a default non-zero duration.
- InitializeDemuxer(streams, base::TimeDelta::FromSeconds(10));
- }
-
- StrictMock<MockDemuxerStream>* CreateStream(DemuxerStream::Type type) {
- StrictMock<MockDemuxerStream>* stream =
- new StrictMock<MockDemuxerStream>();
- EXPECT_CALL(*stream, type())
- .WillRepeatedly(Return(type));
- return stream;
- }
-
- // Sets up expectations to allow the video renderer to initialize.
- void InitializeVideoRenderer(const scoped_refptr<DemuxerStream>& stream) {
- EXPECT_CALL(*mocks_->video_renderer(),
- Initialize(stream, _, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->video_renderer(), SetPlaybackRate(0.0f));
-
- // Startup sequence.
- EXPECT_CALL(*mocks_->video_renderer(),
- Preroll(mocks_->demuxer()->GetStartTime(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->video_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
- }
-
- // Sets up expectations to allow the audio renderer to initialize.
- void InitializeAudioRenderer(const scoped_refptr<DemuxerStream>& stream,
- bool disable_after_init_cb) {
- if (disable_after_init_cb) {
- EXPECT_CALL(*mocks_->audio_renderer(),
- Initialize(stream, _, _, _, _, _, _, _, _))
- .WillOnce(DoAll(RunCallback<2>(PIPELINE_OK),
- WithArg<7>(RunClosure<0>()))); // |disabled_cb|.
- } else {
- EXPECT_CALL(*mocks_->audio_renderer(),
- Initialize(stream, _, _, _, _, _, _, _, _))
- .WillOnce(DoAll(SaveArg<5>(&audio_time_cb_),
- RunCallback<2>(PIPELINE_OK)));
- }
- }
-
- // Sets up expectations on the callback and initializes the pipeline. Called
- // after tests have set expectations any filters they wish to use.
- void InitializePipeline(PipelineStatus start_status) {
- EXPECT_CALL(callbacks_, OnStart(start_status));
-
- if (start_status == PIPELINE_OK) {
- EXPECT_CALL(callbacks_, OnBufferingState(PipelineImpl::kHaveMetadata));
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(0.0f));
-
- if (audio_stream_) {
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(1.0f));
-
- // Startup sequence.
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
- }
- EXPECT_CALL(callbacks_,
- OnBufferingState(PipelineImpl::kPrerollCompleted));
- }
-
- pipeline_->Start(
- mocks_->Create().Pass(), SetDecryptorReadyCB(),
- base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
- base::Unretained(&callbacks_)),
- base::Closure());
- message_loop_.RunUntilIdle();
- }
-
- void CreateAudioStream() {
- audio_stream_ = CreateStream(DemuxerStream::AUDIO);
- }
-
- void CreateVideoStream() {
- video_stream_ = CreateStream(DemuxerStream::VIDEO);
- EXPECT_CALL(*video_stream_, video_decoder_config())
- .WillRepeatedly(ReturnRef(video_decoder_config_));
- }
-
- MockDemuxerStream* audio_stream() {
- return audio_stream_;
- }
-
- MockDemuxerStream* video_stream() {
- return video_stream_;
- }
-
- void ExpectSeek(const base::TimeDelta& seek_time) {
- // Every filter should receive a call to Seek().
- EXPECT_CALL(*mocks_->demuxer(), Seek(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(_));
-
- if (audio_stream_) {
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(_));
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(_));
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
- }
-
- if (video_stream_) {
- EXPECT_CALL(*mocks_->video_renderer(), Pause(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->video_renderer(), SetPlaybackRate(_));
- EXPECT_CALL(*mocks_->video_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
- }
-
- EXPECT_CALL(callbacks_, OnBufferingState(PipelineImpl::kPrerollCompleted));
-
- // We expect a successful seek callback.
- EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
- }
-
- void DoSeek(const base::TimeDelta& seek_time) {
- pipeline_->Seek(seek_time,
- base::Bind(&CallbackHelper::OnSeek,
- base::Unretained(&callbacks_)));
-
- // We expect the time to be updated only after the seek has completed.
- EXPECT_NE(seek_time, pipeline_->GetMediaTime());
- message_loop_.RunUntilIdle();
- EXPECT_EQ(seek_time, pipeline_->GetMediaTime());
- }
-
- // Fixture members.
- StrictMock<CallbackHelper> callbacks_;
- MessageLoop message_loop_;
- scoped_refptr<PipelineImpl> pipeline_;
- scoped_ptr<media::MockFilterCollection> mocks_;
- scoped_refptr<StrictMock<MockDemuxerStream> > audio_stream_;
- scoped_refptr<StrictMock<MockDemuxerStream> > video_stream_;
- AudioRenderer::TimeCB audio_time_cb_;
- VideoDecoderConfig video_decoder_config_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PipelineTest);
-};
-
-// Test that playback controls methods no-op when the pipeline hasn't been
-// started.
-TEST_F(PipelineTest, NotStarted) {
- const base::TimeDelta kZero;
-
- EXPECT_FALSE(pipeline_->IsRunning());
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_FALSE(pipeline_->HasVideo());
-
- // Setting should still work.
- EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
- pipeline_->SetPlaybackRate(-1.0f);
- EXPECT_EQ(0.0f, pipeline_->GetPlaybackRate());
- pipeline_->SetPlaybackRate(1.0f);
- EXPECT_EQ(1.0f, pipeline_->GetPlaybackRate());
-
- // Setting should still work.
- EXPECT_EQ(1.0f, pipeline_->GetVolume());
- pipeline_->SetVolume(-1.0f);
- EXPECT_EQ(1.0f, pipeline_->GetVolume());
- pipeline_->SetVolume(0.0f);
- EXPECT_EQ(0.0f, pipeline_->GetVolume());
-
- EXPECT_TRUE(kZero == pipeline_->GetMediaTime());
- EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_TRUE(kZero == pipeline_->GetMediaDuration());
-
- EXPECT_EQ(0, pipeline_->GetTotalBytes());
-
- // Should always get set to zero.
- gfx::Size size(1, 1);
- pipeline_->GetNaturalVideoSize(&size);
- EXPECT_EQ(0, size.width());
- EXPECT_EQ(0, size.height());
-}
-
-TEST_F(PipelineTest, NeverInitializes) {
- // Don't execute the callback passed into Initialize().
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _));
-
- // This test hangs during initialization by never calling
- // InitializationComplete(). StrictMock<> will ensure that the callback is
- // never executed.
- pipeline_->Start(
- mocks_->Create().Pass(), SetDecryptorReadyCB(),
- base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
- base::Unretained(&callbacks_)),
- base::Closure());
- message_loop_.RunUntilIdle();
-
-
- // Because our callback will get executed when the test tears down, we'll
- // verify that nothing has been called, then set our expectation for the call
- // made during tear down.
- Mock::VerifyAndClear(&callbacks_);
- EXPECT_CALL(callbacks_, OnStart(PIPELINE_OK));
-}
-
-TEST_F(PipelineTest, URLNotFound) {
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_ERROR_URL_NOT_FOUND));
- EXPECT_CALL(*mocks_->demuxer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- InitializePipeline(PIPELINE_ERROR_URL_NOT_FOUND);
-}
-
-TEST_F(PipelineTest, NoStreams) {
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->demuxer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- InitializePipeline(PIPELINE_ERROR_COULD_NOT_RENDER);
-}
-
-TEST_F(PipelineTest, AudioStream) {
- CreateAudioStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_FALSE(pipeline_->HasVideo());
-}
-
-TEST_F(PipelineTest, VideoStream) {
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-}
-
-TEST_F(PipelineTest, AudioVideoStream) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-}
-
-TEST_F(PipelineTest, Seek) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
-
- // Initialize then seek!
- InitializePipeline(PIPELINE_OK);
-
- // Every filter should receive a call to Seek().
- base::TimeDelta expected = base::TimeDelta::FromSeconds(2000);
- ExpectSeek(expected);
- DoSeek(expected);
-}
-
-TEST_F(PipelineTest, SetVolume) {
- CreateAudioStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
-
- // The audio renderer should receive a call to SetVolume().
- float expected = 0.5f;
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(expected));
-
- // Initialize then set volume!
- InitializePipeline(PIPELINE_OK);
- pipeline_->SetVolume(expected);
-}
-
-TEST_F(PipelineTest, Properties) {
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(video_stream());
-
- const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_EQ(kDuration.ToInternalValue(),
- pipeline_->GetMediaDuration().ToInternalValue());
- EXPECT_EQ(kTotalBytes, pipeline_->GetTotalBytes());
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
-}
-
-TEST_F(PipelineTest, GetBufferedTimeRanges) {
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(video_stream());
-
- const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
-
- EXPECT_EQ(0u, pipeline_->GetBufferedTimeRanges().size());
-
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
- pipeline_->AddBufferedByteRange(0, kTotalBytes / 8);
- EXPECT_TRUE(pipeline_->DidLoadingProgress());
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
- EXPECT_EQ(1u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- pipeline_->AddBufferedTimeRange(base::TimeDelta(), kDuration / 8);
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
-
- base::TimeDelta kSeekTime = kDuration / 2;
- ExpectSeek(kSeekTime);
- DoSeek(kSeekTime);
-
- EXPECT_TRUE(pipeline_->DidLoadingProgress());
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
- pipeline_->AddBufferedByteRange(kTotalBytes / 2,
- kTotalBytes / 2 + kTotalBytes / 8);
- EXPECT_TRUE(pipeline_->DidLoadingProgress());
- EXPECT_FALSE(pipeline_->DidLoadingProgress());
- EXPECT_EQ(2u, pipeline_->GetBufferedTimeRanges().size());
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(1));
- EXPECT_EQ(kDuration / 2 + kDuration / 8,
- pipeline_->GetBufferedTimeRanges().end(1));
-
- pipeline_->AddBufferedTimeRange(kDuration / 4, 3 * kDuration / 8);
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetBufferedTimeRanges().start(0));
- EXPECT_EQ(kDuration / 8, pipeline_->GetBufferedTimeRanges().end(0));
- EXPECT_EQ(kDuration / 4, pipeline_->GetBufferedTimeRanges().start(1));
- EXPECT_EQ(3* kDuration / 8, pipeline_->GetBufferedTimeRanges().end(1));
- EXPECT_EQ(kDuration / 2, pipeline_->GetBufferedTimeRanges().start(2));
- EXPECT_EQ(kDuration / 2 + kDuration / 8,
- pipeline_->GetBufferedTimeRanges().end(2));
-}
-
-TEST_F(PipelineTest, DisableAudioRenderer) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_TRUE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- EXPECT_CALL(*mocks_->demuxer(), OnAudioRendererDisabled());
- pipeline_->OnAudioDisabled();
-
- // Verify that ended event is fired when video ends.
- EXPECT_CALL(callbacks_, OnEnded(PIPELINE_OK));
- pipeline_->OnVideoRendererEnded();
-}
-
-TEST_F(PipelineTest, DisableAudioRendererDuringInit) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), true);
- InitializeVideoRenderer(video_stream());
-
- EXPECT_CALL(*mocks_->demuxer(),
- OnAudioRendererDisabled());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- // Verify that ended event is fired when video ends.
- EXPECT_CALL(callbacks_, OnEnded(PIPELINE_OK));
- pipeline_->OnVideoRendererEnded();
-}
-
-TEST_F(PipelineTest, EndedCallback) {
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
- InitializePipeline(PIPELINE_OK);
-
- // The ended callback shouldn't run until both renderers have ended.
- pipeline_->OnAudioRendererEnded();
- message_loop_.RunUntilIdle();
-
- EXPECT_CALL(callbacks_, OnEnded(PIPELINE_OK));
- pipeline_->OnVideoRendererEnded();
- message_loop_.RunUntilIdle();
-}
-
-// Static function & time variable used to simulate changes in wallclock time.
-static int64 g_static_clock_time;
-static base::Time StaticClockFunction() {
- return base::Time::FromInternalValue(g_static_clock_time);
-}
-
-TEST_F(PipelineTest, AudioStreamShorterThanVideo) {
- base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
-
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
-
- // Replace the clock so we can simulate wallclock time advancing w/o using
- // Sleep().
- pipeline_->SetClockForTesting(new Clock(&StaticClockFunction));
-
- InitializeDemuxer(&streams, duration);
- InitializeAudioRenderer(audio_stream(), false);
- InitializeVideoRenderer(video_stream());
- InitializePipeline(PIPELINE_OK);
-
- EXPECT_EQ(0, pipeline_->GetMediaTime().ToInternalValue());
-
- float playback_rate = 1.0f;
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(playback_rate));
- EXPECT_CALL(*mocks_->video_renderer(), SetPlaybackRate(playback_rate));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(playback_rate));
- pipeline_->SetPlaybackRate(playback_rate);
- message_loop_.RunUntilIdle();
-
- InSequence s;
-
- // Verify that the clock doesn't advance since it hasn't been started by
- // a time update from the audio stream.
- int64 start_time = pipeline_->GetMediaTime().ToInternalValue();
- g_static_clock_time +=
- base::TimeDelta::FromMilliseconds(100).ToInternalValue();
- EXPECT_EQ(pipeline_->GetMediaTime().ToInternalValue(), start_time);
-
- // Signal end of audio stream.
- pipeline_->OnAudioRendererEnded();
- message_loop_.RunUntilIdle();
-
- // Verify that the clock advances.
- start_time = pipeline_->GetMediaTime().ToInternalValue();
- g_static_clock_time +=
- base::TimeDelta::FromMilliseconds(100).ToInternalValue();
- EXPECT_GT(pipeline_->GetMediaTime().ToInternalValue(), start_time);
-
- // Signal end of video stream and make sure OnEnded() callback occurs.
- EXPECT_CALL(callbacks_, OnEnded(PIPELINE_OK));
- pipeline_->OnVideoRendererEnded();
-}
-
-TEST_F(PipelineTest, ErrorDuringSeek) {
- CreateAudioStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializePipeline(PIPELINE_OK);
-
- float playback_rate = 1.0f;
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(playback_rate));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(playback_rate));
- pipeline_->SetPlaybackRate(playback_rate);
- message_loop_.RunUntilIdle();
-
- base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
-
- // Preroll() isn't called as the demuxer errors out first.
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- EXPECT_CALL(*mocks_->demuxer(), Seek(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_ERROR_READ));
- EXPECT_CALL(*mocks_->demuxer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- pipeline_->Seek(seek_time, base::Bind(&CallbackHelper::OnSeek,
- base::Unretained(&callbacks_)));
- EXPECT_CALL(callbacks_, OnSeek(PIPELINE_ERROR_READ));
- message_loop_.RunUntilIdle();
-}
-
-// Invoked function OnError. This asserts that the pipeline does not enqueue
-// non-teardown related tasks while tearing down.
-static void TestNoCallsAfterError(PipelineImpl* pipeline,
- MessageLoop* message_loop,
- PipelineStatus /* status */) {
- CHECK(pipeline);
- CHECK(message_loop);
-
- // When we get to this stage, the message loop should be empty.
- message_loop->AssertIdle();
-
- // Make calls on pipeline after error has occurred.
- pipeline->SetPlaybackRate(0.5f);
- pipeline->SetVolume(0.5f);
-
- // No additional tasks should be queued as a result of these calls.
- message_loop->AssertIdle();
-}
-
-TEST_F(PipelineTest, NoMessageDuringTearDownFromError) {
- CreateAudioStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializePipeline(PIPELINE_OK);
-
- // Trigger additional requests on the pipeline during tear down from error.
- base::Callback<void(PipelineStatus)> cb = base::Bind(
- &TestNoCallsAfterError, pipeline_, &message_loop_);
- ON_CALL(callbacks_, OnError(_))
- .WillByDefault(Invoke(&cb, &base::Callback<void(PipelineStatus)>::Run));
-
- base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
-
- // Seek() isn't called as the demuxer errors out first.
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- EXPECT_CALL(*mocks_->demuxer(), Seek(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_ERROR_READ));
- EXPECT_CALL(*mocks_->demuxer(), Stop(_))
- .WillOnce(RunClosure<0>());
-
- pipeline_->Seek(seek_time, base::Bind(&CallbackHelper::OnSeek,
- base::Unretained(&callbacks_)));
- EXPECT_CALL(callbacks_, OnSeek(PIPELINE_ERROR_READ));
- message_loop_.RunUntilIdle();
-}
-
-TEST_F(PipelineTest, StartTimeIsZero) {
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(video_stream());
-
- const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- EXPECT_EQ(base::TimeDelta(), pipeline_->GetMediaTime());
-}
-
-TEST_F(PipelineTest, StartTimeIsNonZero) {
- const base::TimeDelta kStartTime = base::TimeDelta::FromSeconds(4);
- const base::TimeDelta kDuration = base::TimeDelta::FromSeconds(100);
-
- EXPECT_CALL(*mocks_->demuxer(), GetStartTime())
- .WillRepeatedly(Return(kStartTime));
-
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(video_stream());
-
- InitializeDemuxer(&streams, kDuration);
- InitializeVideoRenderer(video_stream());
-
- InitializePipeline(PIPELINE_OK);
- EXPECT_FALSE(pipeline_->HasAudio());
- EXPECT_TRUE(pipeline_->HasVideo());
-
- EXPECT_EQ(kStartTime, pipeline_->GetMediaTime());
-}
-
-static void RunTimeCB(const AudioRenderer::TimeCB& time_cb,
- int time_in_ms,
- int max_time_in_ms) {
- time_cb.Run(base::TimeDelta::FromMilliseconds(time_in_ms),
- base::TimeDelta::FromMilliseconds(max_time_in_ms));
-}
-
-TEST_F(PipelineTest, AudioTimeUpdateDuringSeek) {
- CreateAudioStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
-
- InitializeDemuxer(&streams);
- InitializeAudioRenderer(audio_stream(), false);
- InitializePipeline(PIPELINE_OK);
-
- float playback_rate = 1.0f;
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(playback_rate));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(playback_rate));
- pipeline_->SetPlaybackRate(playback_rate);
- message_loop_.RunUntilIdle();
-
- // Provide an initial time update so that the pipeline transitions out of the
- // "waiting for time update" state.
- audio_time_cb_.Run(base::TimeDelta::FromMilliseconds(100),
- base::TimeDelta::FromMilliseconds(500));
-
- base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
-
- // Arrange to trigger a time update while the demuxer is in the middle of
- // seeking. This update should be ignored by the pipeline and the clock should
- // not get updated.
- base::Closure closure = base::Bind(&RunTimeCB, audio_time_cb_, 300, 700);
- EXPECT_CALL(*mocks_->demuxer(), Seek(seek_time, _))
- .WillOnce(DoAll(InvokeWithoutArgs(&closure, &base::Closure::Run),
- RunCallback<1>(PIPELINE_OK)));
-
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(seek_time, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(_));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(_));
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(_));
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
-
- EXPECT_CALL(callbacks_, OnBufferingState(PipelineImpl::kPrerollCompleted));
- EXPECT_CALL(callbacks_, OnSeek(PIPELINE_OK));
- DoSeek(seek_time);
-
- EXPECT_EQ(pipeline_->GetMediaTime(), seek_time);
-
- // Now that the seek is complete, verify that time updates advance the current
- // time.
- base::TimeDelta new_time = seek_time + base::TimeDelta::FromMilliseconds(100);
- audio_time_cb_.Run(new_time, new_time);
-
- EXPECT_EQ(pipeline_->GetMediaTime(), new_time);
-}
-
-class FlexibleCallbackRunner : public base::DelegateSimpleThread::Delegate {
- public:
- FlexibleCallbackRunner(base::TimeDelta delay, PipelineStatus status,
- const PipelineStatusCB& status_cb)
- : delay_(delay),
- status_(status),
- status_cb_(status_cb) {
- if (delay_ < base::TimeDelta()) {
- status_cb_.Run(status_);
- return;
- }
- }
- virtual void Run() {
- if (delay_ < base::TimeDelta()) return;
- base::PlatformThread::Sleep(delay_);
- status_cb_.Run(status_);
- }
-
- private:
- base::TimeDelta delay_;
- PipelineStatus status_;
- PipelineStatusCB status_cb_;
-};
-
-void TestPipelineStatusNotification(base::TimeDelta delay) {
- PipelineStatusNotification note;
- // Arbitrary error value we expect to fish out of the notification after the
- // callback is fired.
- const PipelineStatus expected_error = PIPELINE_ERROR_URL_NOT_FOUND;
- FlexibleCallbackRunner runner(delay, expected_error, note.Callback());
- base::DelegateSimpleThread thread(&runner, "FlexibleCallbackRunner");
- thread.Start();
- note.Wait();
- EXPECT_EQ(note.status(), expected_error);
- thread.Join();
-}
-
-// Test that in-line callback (same thread, no yield) works correctly.
-TEST(PipelineStatusNotificationTest, InlineCallback) {
- TestPipelineStatusNotification(base::TimeDelta::FromMilliseconds(-1));
-}
-
-// Test that different-thread, no-delay callback works correctly.
-TEST(PipelineStatusNotificationTest, ImmediateCallback) {
- TestPipelineStatusNotification(base::TimeDelta::FromMilliseconds(0));
-}
-
-// Test that different-thread, some-delay callback (the expected common case)
-// works correctly.
-TEST(PipelineStatusNotificationTest, DelayedCallback) {
- TestPipelineStatusNotification(base::TimeDelta::FromMilliseconds(20));
-}
-
-class PipelineTeardownTest : public PipelineTest {
- public:
- enum TeardownState {
- kInitDemuxer,
- kInitAudioRenderer,
- kInitVideoRenderer,
- kPausing,
- kFlushing,
- kSeeking,
- kPrerolling,
- kStarting,
- kPlaying,
- };
-
- enum StopOrError {
- kStop,
- kError,
- };
-
- PipelineTeardownTest() {}
- virtual ~PipelineTeardownTest() {}
-
- void RunTest(TeardownState state, StopOrError stop_or_error) {
- switch (state) {
- case kInitDemuxer:
- case kInitAudioRenderer:
- case kInitVideoRenderer:
- DoInitialize(state, stop_or_error);
- break;
-
- case kPausing:
- case kFlushing:
- case kSeeking:
- case kPrerolling:
- case kStarting:
- DoInitialize(state, stop_or_error);
- DoSeek(state, stop_or_error);
- break;
-
- case kPlaying:
- DoInitialize(state, stop_or_error);
- DoStopOrError(stop_or_error);
- break;
- }
- }
-
- private:
- // TODO(scherkus): We do radically different things whether teardown is
- // invoked via stop vs error. The teardown path should be the same,
- // see http://crbug.com/110228
- void DoInitialize(TeardownState state, StopOrError stop_or_error) {
- PipelineStatus expected_status =
- SetInitializeExpectations(state, stop_or_error);
-
- EXPECT_CALL(callbacks_, OnStart(expected_status));
- pipeline_->Start(
- mocks_->Create().Pass(), SetDecryptorReadyCB(),
- base::Bind(&CallbackHelper::OnEnded, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnError, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)),
- base::Bind(&CallbackHelper::OnBufferingState,
- base::Unretained(&callbacks_)),
- base::Closure());
- message_loop_.RunUntilIdle();
- }
-
- PipelineStatus SetInitializeExpectations(TeardownState state,
- StopOrError stop_or_error) {
- PipelineStatus status = PIPELINE_OK;
- base::Closure stop_cb = base::Bind(
- &CallbackHelper::OnStop, base::Unretained(&callbacks_));
-
- if (state == kInitDemuxer) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb),
- RunCallback<1>(PIPELINE_OK)));
- EXPECT_CALL(callbacks_, OnStop());
- } else {
- status = DEMUXER_ERROR_COULD_NOT_OPEN;
- EXPECT_CALL(*mocks_->demuxer(), Initialize(_, _))
- .WillOnce(RunCallback<1>(status));
- }
-
- EXPECT_CALL(*mocks_->demuxer(), Stop(_)).WillOnce(RunClosure<0>());
- return status;
- }
-
- CreateAudioStream();
- CreateVideoStream();
- MockDemuxerStreamVector streams;
- streams.push_back(audio_stream());
- streams.push_back(video_stream());
- InitializeDemuxer(&streams, base::TimeDelta::FromSeconds(3000));
-
- if (state == kInitAudioRenderer) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->audio_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb),
- RunCallback<2>(PIPELINE_OK)));
- EXPECT_CALL(callbacks_, OnStop());
- } else {
- status = PIPELINE_ERROR_INITIALIZATION_FAILED;
- EXPECT_CALL(*mocks_->audio_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(status));
- }
-
- EXPECT_CALL(*mocks_->demuxer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- return status;
- }
-
- EXPECT_CALL(*mocks_->audio_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(PIPELINE_OK));
-
- if (state == kInitVideoRenderer) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->video_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _, _))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb),
- RunCallback<2>(PIPELINE_OK)));
- EXPECT_CALL(callbacks_, OnStop());
- } else {
- status = PIPELINE_ERROR_INITIALIZATION_FAILED;
- EXPECT_CALL(*mocks_->video_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(status));
- }
-
- EXPECT_CALL(*mocks_->demuxer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- return status;
- }
-
- EXPECT_CALL(*mocks_->video_renderer(),
- Initialize(_, _, _, _, _, _, _, _, _, _))
- .WillOnce(RunCallback<2>(PIPELINE_OK));
-
- EXPECT_CALL(callbacks_, OnBufferingState(PipelineImpl::kHaveMetadata));
-
- // If we get here it's a successful initialization.
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->video_renderer(), Preroll(base::TimeDelta(), _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->video_renderer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(1.0f));
-
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Play(_))
- .WillOnce(RunClosure<0>());
-
- if (status == PIPELINE_OK)
- EXPECT_CALL(callbacks_,
- OnBufferingState(PipelineImpl::kPrerollCompleted));
-
- return status;
- }
-
- void DoSeek(TeardownState state, StopOrError stop_or_error) {
- InSequence s;
- PipelineStatus status = SetSeekExpectations(state, stop_or_error);
-
- EXPECT_CALL(*mocks_->demuxer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(callbacks_, OnSeek(status));
-
- if (status == PIPELINE_OK) {
- EXPECT_CALL(callbacks_, OnStop());
- }
-
- pipeline_->Seek(base::TimeDelta::FromSeconds(10), base::Bind(
- &CallbackHelper::OnSeek, base::Unretained(&callbacks_)));
- message_loop_.RunUntilIdle();
- }
-
- PipelineStatus SetSeekExpectations(TeardownState state,
- StopOrError stop_or_error) {
- PipelineStatus status = PIPELINE_OK;
- base::Closure stop_cb = base::Bind(
- &CallbackHelper::OnStop, base::Unretained(&callbacks_));
-
- if (state == kPausing) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb), RunClosure<0>()));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_))
- .WillOnce(DoAll(SetError(pipeline_, status), RunClosure<0>()));
- }
-
- return status;
- }
-
- EXPECT_CALL(*mocks_->audio_renderer(), Pause(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Pause(_)).WillOnce(RunClosure<0>());
-
- if (state == kFlushing) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb), RunClosure<0>()));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_))
- .WillOnce(DoAll(SetError(pipeline_, status), RunClosure<0>()));
- }
-
- return status;
- }
-
- EXPECT_CALL(*mocks_->audio_renderer(), Flush(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Flush(_)).WillOnce(RunClosure<0>());
-
- if (state == kSeeking) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->demuxer(), Seek(_, _))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb),
- RunCallback<1>(PIPELINE_OK)));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*mocks_->demuxer(), Seek(_, _))
- .WillOnce(RunCallback<1>(status));
- }
-
- return status;
- }
-
- EXPECT_CALL(*mocks_->demuxer(), Seek(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
- if (state == kPrerolling) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(_, _))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb),
- RunCallback<1>(PIPELINE_OK)));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(_, _))
- .WillOnce(RunCallback<1>(status));
- }
-
- return status;
- }
-
- EXPECT_CALL(*mocks_->audio_renderer(), Preroll(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
- EXPECT_CALL(*mocks_->video_renderer(), Preroll(_, _))
- .WillOnce(RunCallback<1>(PIPELINE_OK));
-
- // Playback rate and volume are updated prior to starting.
- EXPECT_CALL(*mocks_->demuxer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->audio_renderer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->video_renderer(), SetPlaybackRate(0.0f));
- EXPECT_CALL(*mocks_->audio_renderer(), SetVolume(1.0f));
-
- if (state == kStarting) {
- if (stop_or_error == kStop) {
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(DoAll(Stop(pipeline_, stop_cb), RunClosure<0>()));
- } else {
- status = PIPELINE_ERROR_READ;
- EXPECT_CALL(*mocks_->audio_renderer(), Play(_))
- .WillOnce(DoAll(SetError(pipeline_, status), RunClosure<0>()));
- }
- return status;
- }
-
- NOTREACHED() << "State not supported: " << state;
- return status;
- }
-
- void DoStopOrError(StopOrError stop_or_error) {
- InSequence s;
-
- EXPECT_CALL(*mocks_->demuxer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->audio_renderer(), Stop(_)).WillOnce(RunClosure<0>());
- EXPECT_CALL(*mocks_->video_renderer(), Stop(_)).WillOnce(RunClosure<0>());
-
- if (stop_or_error == kStop) {
- EXPECT_CALL(callbacks_, OnStop());
- pipeline_->Stop(base::Bind(
- &CallbackHelper::OnStop, base::Unretained(&callbacks_)));
- } else {
- EXPECT_CALL(callbacks_, OnError(PIPELINE_ERROR_READ));
- pipeline_->SetErrorForTesting(PIPELINE_ERROR_READ);
- }
-
- message_loop_.RunUntilIdle();
- }
-
- DISALLOW_COPY_AND_ASSIGN(PipelineTeardownTest);
-};
-
-#define INSTANTIATE_TEARDOWN_TEST(stop_or_error, state) \
- TEST_F(PipelineTeardownTest, stop_or_error##_##state) { \
- RunTest(k##state, k##stop_or_error); \
- }
-
-INSTANTIATE_TEARDOWN_TEST(Stop, InitDemuxer);
-INSTANTIATE_TEARDOWN_TEST(Stop, InitAudioRenderer);
-INSTANTIATE_TEARDOWN_TEST(Stop, InitVideoRenderer);
-INSTANTIATE_TEARDOWN_TEST(Stop, Pausing);
-INSTANTIATE_TEARDOWN_TEST(Stop, Flushing);
-INSTANTIATE_TEARDOWN_TEST(Stop, Seeking);
-INSTANTIATE_TEARDOWN_TEST(Stop, Prerolling);
-INSTANTIATE_TEARDOWN_TEST(Stop, Starting);
-INSTANTIATE_TEARDOWN_TEST(Stop, Playing);
-
-INSTANTIATE_TEARDOWN_TEST(Error, InitDemuxer);
-INSTANTIATE_TEARDOWN_TEST(Error, InitAudioRenderer);
-INSTANTIATE_TEARDOWN_TEST(Error, InitVideoRenderer);
-INSTANTIATE_TEARDOWN_TEST(Error, Pausing);
-INSTANTIATE_TEARDOWN_TEST(Error, Flushing);
-INSTANTIATE_TEARDOWN_TEST(Error, Seeking);
-INSTANTIATE_TEARDOWN_TEST(Error, Prerolling);
-INSTANTIATE_TEARDOWN_TEST(Error, Starting);
-INSTANTIATE_TEARDOWN_TEST(Error, Playing);
-
-} // namespace media
diff --git a/src/media/base/pipeline_status.cc b/src/media/base/pipeline_status.cc
deleted file mode 100644
index 6c08383..0000000
--- a/src/media/base/pipeline_status.cc
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/pipeline_status.h"
-
-#include "base/bind.h"
-#include "base/metrics/histogram.h"
-
-namespace media {
-
-static void ReportAndRun(const std::string& name,
- const PipelineStatusCB& cb,
- PipelineStatus status) {
- UMA_HISTOGRAM_ENUMERATION(name, status, PIPELINE_STATUS_MAX);
- cb.Run(status);
-}
-
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb) {
- return base::Bind(&ReportAndRun, name, cb);
-}
-
-} // namespace media
diff --git a/src/media/base/pipeline_status.h b/src/media/base/pipeline_status.h
deleted file mode 100644
index c208d01..0000000
--- a/src/media/base/pipeline_status.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_PIPELINE_STATUS_H_
-#define MEDIA_BASE_PIPELINE_STATUS_H_
-
-#include "base/callback.h"
-
-#include <string>
-
-namespace media {
-
-// Status states for pipeline. All codes except PIPELINE_OK indicate errors.
-// Logged to UMA, so never reuse a value, always add new/greater ones!
-// TODO(vrk/scherkus): Trim the unused status codes. (crbug.com/126070)
-enum PipelineStatus {
- PIPELINE_OK = 0,
- PIPELINE_ERROR_URL_NOT_FOUND = 1,
- PIPELINE_ERROR_NETWORK = 2,
- PIPELINE_ERROR_DECODE = 3,
- PIPELINE_ERROR_DECRYPT = 4,
- PIPELINE_ERROR_ABORT = 5,
- PIPELINE_ERROR_INITIALIZATION_FAILED = 6,
- PIPELINE_ERROR_COULD_NOT_RENDER = 8,
- PIPELINE_ERROR_READ = 9,
- PIPELINE_ERROR_OPERATION_PENDING = 10,
- PIPELINE_ERROR_INVALID_STATE = 11,
- // Demuxer related errors.
- DEMUXER_ERROR_COULD_NOT_OPEN = 12,
- DEMUXER_ERROR_COULD_NOT_PARSE = 13,
- DEMUXER_ERROR_NO_SUPPORTED_STREAMS = 14,
- // Decoder related errors.
- DECODER_ERROR_NOT_SUPPORTED = 15,
- PIPELINE_STATUS_MAX, // Must be greater than all other values logged.
-};
-
-typedef base::Callback<void(PipelineStatus)> PipelineStatusCB;
-
-// Wrap & return a callback around |cb| which reports its argument to UMA under
-// the requested |name|.
-PipelineStatusCB CreateUMAReportingPipelineCB(const std::string& name,
- const PipelineStatusCB& cb);
-
-// TODO(scherkus): this should be moved alongside host interface definitions.
-struct PipelineStatistics {
- PipelineStatistics()
- : audio_bytes_decoded(0),
- video_bytes_decoded(0),
- video_frames_decoded(0),
- video_frames_dropped(0) {
- }
-
- uint32 audio_bytes_decoded; // Should be uint64?
- uint32 video_bytes_decoded; // Should be uint64?
- uint32 video_frames_decoded;
- uint32 video_frames_dropped;
-};
-
-// Used for updating pipeline statistics.
-typedef base::Callback<void(const PipelineStatistics&)> StatisticsCB;
-
-} // namespace media
-
-#endif // MEDIA_BASE_PIPELINE_STATUS_H_
diff --git a/src/media/base/ranges.cc b/src/media/base/ranges.cc
deleted file mode 100644
index b7b2b55..0000000
--- a/src/media/base/ranges.cc
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/ranges.h"
-
-namespace media {
-
-template<>
-void Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
- const base::TimeDelta& rhs) const {
- DCHECK(lhs < rhs) << lhs.ToInternalValue() << " < " << rhs.ToInternalValue();
-}
-
-} // namespace media
diff --git a/src/media/base/ranges.h b/src/media/base/ranges.h
deleted file mode 100644
index 5a1df66..0000000
--- a/src/media/base/ranges.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_RANGES_H_
-#define MEDIA_BASE_RANGES_H_
-
-#include <algorithm>
-#include <ostream>
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Ranges allows holding an ordered list of ranges of [start,end) intervals.
-// The canonical example use-case is holding the list of ranges of buffered
-// bytes or times in a <video> tag.
-template<class T> // Endpoint type; typically a base::TimeDelta or an int64.
-class Ranges {
- public:
- // Allow copy & assign.
-
- // Add (start,end) to this object, coallescing overlaps as appropriate.
- // Returns the number of stored ranges, post coallescing.
- size_t Add(T start, T end);
-
- // Return the number of disjoint ranges.
- size_t size() const;
-
- // Return the "i"'th range's start & end (0-based).
- T start(int i) const;
- T end(int i) const;
-
- // Clear all ranges.
- void clear();
-
- // Computes the intersection between this range and |other|.
- Ranges<T> IntersectionWith(const Ranges<T>& other) const;
-
- private:
- // Wrapper around DCHECK_LT allowing comparisons of operator<<'able T's.
- void DCheckLT(const T& lhs, const T& rhs) const;
-
- // Disjoint, in increasing order of start.
- std::vector<std::pair<T, T> > ranges_;
-};
-
-//////////////////////////////////////////////////////////////////////
-// EVERYTHING BELOW HERE IS IMPLEMENTATION DETAIL!!
-//////////////////////////////////////////////////////////////////////
-
-template<class T>
-size_t Ranges<T>::Add(T start, T end) {
- if (start == end) // Nothing to be done with empty ranges.
- return ranges_.size();
-
- DCheckLT(start, end);
- size_t i;
- // Walk along the array of ranges until |start| is no longer larger than the
- // current interval's end.
- for (i = 0; i < ranges_.size() && ranges_[i].second < start; ++i) {
- // Empty body
- }
-
- // Now we know |start| belongs in the i'th slot.
- // If i is the end of the range, append new range and done.
- if (i == ranges_.size()) {
- ranges_.push_back(std::make_pair(start, end));
- return ranges_.size();
- }
-
- // If |end| is less than i->first, then [start,end) is a new (non-overlapping)
- // i'th entry pushing everyone else back, and done.
- if (end < ranges_[i].first) {
- ranges_.insert(ranges_.begin() + i, std::make_pair(start, end));
- return ranges_.size();
- }
-
- // Easy cases done. Getting here means there is overlap between [start,end)
- // and the existing ranges.
-
- // Now: start <= i->second && i->first <= end
- if (start < ranges_[i].first)
- ranges_[i].first = start;
- if (ranges_[i].second < end)
- ranges_[i].second = end;
-
- // Now: [start,end) is contained in the i'th range, and we'd be done, except
- // for the fact that the newly-extended i'th range might now overlap
- // subsequent ranges. Merge until discontinuities appear. Note that there's
- // no need to test/merge previous ranges, since needing that would mean the
- // original loop went too far.
- while ((i + 1) < ranges_.size() &&
- ranges_[i + 1].first <= ranges_[i].second) {
- ranges_[i].second = std::max(ranges_[i].second, ranges_[i + 1].second);
- ranges_.erase(ranges_.begin() + i + 1);
- }
-
- return ranges_.size();
-}
-
-template<>
-void Ranges<base::TimeDelta>::DCheckLT(const base::TimeDelta& lhs,
- const base::TimeDelta& rhs) const;
-
-template<class T>
-void Ranges<T>::DCheckLT(const T& lhs, const T& rhs) const {
- DCHECK_LT(lhs, rhs);
-}
-
-template<class T>
-size_t Ranges<T>::size() const {
- return ranges_.size();
-}
-
-template<class T>
-T Ranges<T>::start(int i) const {
- return ranges_[static_cast<size_t>(i)].first;
-}
-
-template<class T>
-T Ranges<T>::end(int i) const {
- return ranges_[static_cast<size_t>(i)].second;
-}
-
-template<class T>
-void Ranges<T>::clear() {
- ranges_.clear();
-}
-
-template<class T>
-Ranges<T> Ranges<T>::IntersectionWith(const Ranges<T>& other) const {
- Ranges<T> result;
-
- size_t i = 0;
- size_t j = 0;
-
- while (i < size() && j < other.size()) {
- T max_start = std::max(start(i), other.start(j));
- T min_end = std::min(end(i), other.end(j));
-
- // Add an intersection range to the result if the ranges overlap.
- if (max_start < min_end)
- result.Add(max_start, min_end);
-
- if (end(i) < other.end(j))
- ++i;
- else
- ++j;
- }
-
- return result;
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_RANGES_H_
diff --git a/src/media/base/ranges_unittest.cc b/src/media/base/ranges_unittest.cc
deleted file mode 100644
index 967d138..0000000
--- a/src/media/base/ranges_unittest.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <sstream>
-
-#include "media/base/ranges.h"
-
-#include "base/string_piece.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-// Human-readable output operator, for debugging/testability.
-template<class T>
-std::ostream& operator<<(std::ostream& os, const Ranges<T>& r) {
- os << "{ ";
- for(size_t i = 0; i < r.size(); ++i)
- os << "[" << r.start(i) << "," << r.end(i) << ") ";
- os << "}";
- return os;
-}
-
-// Helper method for asserting stringified form of |r| matches expectation.
-template<class T>
-static void ExpectRanges(const Ranges<T>& r,
- const base::StringPiece& expected_string) {
- std::stringstream ss;
- ss << r;
- ASSERT_EQ(ss.str(), expected_string);
-}
-
-#define ASSERT_RANGES(ranges, expectation) \
- ASSERT_NO_FATAL_FAILURE(ExpectRanges(ranges, expectation));
-
-TEST(RangesTest, SimpleTests) {
- Ranges<int> r;
- ASSERT_EQ(r.size(), 0u) << r;
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.size(), 1u) << r;
- ASSERT_RANGES(r, "{ [0,1) }");
- ASSERT_EQ(r.Add(2, 3), 2u) << r;
- ASSERT_RANGES(r, "{ [0,1) [2,3) }");
- ASSERT_EQ(r.Add(1, 2), 1u) << r;
- ASSERT_RANGES(r, "{ [0,3) }");
- ASSERT_EQ(r.Add(1, 4), 1u) << r;
- ASSERT_RANGES(r, "{ [0,4) }");
- ASSERT_EQ(r.Add(7, 9), 2u) << r;
- ASSERT_EQ(r.Add(5, 6), 3u) << r;
- ASSERT_RANGES(r, "{ [0,4) [5,6) [7,9) }");
- ASSERT_EQ(r.Add(6, 7), 2u) << r;
- ASSERT_RANGES(r, "{ [0,4) [5,9) }");
-}
-
-TEST(RangesTest, ExtendRange) {
- Ranges<double> r;
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(0.5, 1.5), 1u) << r;
- ASSERT_RANGES(r, "{ [0,1.5) }");
-
- r.clear();
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(-0.5, 0.5), 1u) << r;
- ASSERT_RANGES(r, "{ [-0.5,1) }");
-
- r.clear();
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(2, 3), 2u) << r;
- ASSERT_EQ(r.Add(4, 5), 3u) << r;
- ASSERT_EQ(r.Add(0.5, 1.5), 3u) << r;
- ASSERT_RANGES(r, "{ [0,1.5) [2,3) [4,5) }");
-
- r.clear();
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(2, 3), 2u) << r;
- ASSERT_EQ(r.Add(4, 5), 3u) << r;
- ASSERT_EQ(r.Add(1.5, 2.5), 3u) << r;
- ASSERT_RANGES(r, "{ [0,1) [1.5,3) [4,5) }");
-}
-
-TEST(RangesTest, CoalesceRanges) {
- Ranges<double> r;
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(2, 3), 2u) << r;
- ASSERT_EQ(r.Add(4, 5), 3u) << r;
- ASSERT_EQ(r.Add(0.5, 2.5), 2u) << r;
- ASSERT_RANGES(r, "{ [0,3) [4,5) }");
-
- r.clear();
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(2, 3), 2u) << r;
- ASSERT_EQ(r.Add(4, 5), 3u) << r;
- ASSERT_EQ(r.Add(0.5, 4.5), 1u) << r;
- ASSERT_RANGES(r, "{ [0,5) }");
-
- r.clear();
- ASSERT_EQ(r.Add(0, 1), 1u) << r;
- ASSERT_EQ(r.Add(1, 2), 1u) << r;
- ASSERT_RANGES(r, "{ [0,2) }");
-}
-
-TEST(RangesTest, IntersectionWith) {
- Ranges<int> a;
- Ranges<int> b;
-
- ASSERT_EQ(a.Add(0, 1), 1u) << a;
- ASSERT_EQ(a.Add(4, 7), 2u) << a;
- ASSERT_EQ(a.Add(10, 12), 3u) << a;
-
- // Test intersections with an empty range.
- ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b, "{ }");
- ASSERT_RANGES(a.IntersectionWith(b), "{ }");
- ASSERT_RANGES(b.IntersectionWith(a), "{ }");
-
- // Test intersections with a completely overlaping range.
- ASSERT_EQ(b.Add(-1, 13), 1u) << b;
- ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b, "{ [-1,13) }");
- ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [4,7) [10,12) }");
-
- // Test intersections with a disjoint ranges.
- b.clear();
- ASSERT_EQ(b.Add(1, 4), 1u) << b;
- ASSERT_EQ(b.Add(8, 9), 2u) << b;
- ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b, "{ [1,4) [8,9) }");
- ASSERT_RANGES(a.IntersectionWith(b), "{ }");
- ASSERT_RANGES(b.IntersectionWith(a), "{ }");
-
- // Test intersections with partially overlapping ranges.
- b.clear();
- ASSERT_EQ(b.Add(0, 3), 1u) << b;
- ASSERT_EQ(b.Add(5, 11), 2u) << b;
- ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b, "{ [0,3) [5,11) }");
- ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [5,7) [10,11) }");
- ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [5,7) [10,11) }");
-
- // Test intersection with a range that starts at the beginning of the
- // first range and ends at the end of the last range.
- b.clear();
- ASSERT_EQ(b.Add(0, 12), 1u) << b;
- ASSERT_RANGES(a, "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b, "{ [0,12) }");
- ASSERT_RANGES(a.IntersectionWith(b), "{ [0,1) [4,7) [10,12) }");
- ASSERT_RANGES(b.IntersectionWith(a), "{ [0,1) [4,7) [10,12) }");
-}
-
-} // namespace media
diff --git a/src/media/base/run_all_unittests.cc b/src/media/base/run_all_unittests.cc
deleted file mode 100644
index 1942124..0000000
--- a/src/media/base/run_all_unittests.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/test/main_hook.h"
-#include "base/test/test_suite.h"
-#include "media/base/media.h"
-
-class TestSuiteNoAtExit : public base::TestSuite {
- public:
- TestSuiteNoAtExit(int argc, char** argv) : TestSuite(argc, argv) {}
- virtual ~TestSuiteNoAtExit() {}
- protected:
- virtual void Initialize();
-};
-
-void TestSuiteNoAtExit::Initialize() {
- // Run TestSuite::Initialize first so that logging is initialized.
- base::TestSuite::Initialize();
-#if !defined(__LB_SHELL__) && !defined(COBALT)
- // Run this here instead of main() to ensure an AtExitManager is already
- // present.
- media::InitializeMediaLibraryForTesting();
-#endif
-}
-
-int main(int argc, char** argv) {
- MainHook hook(main, argc, argv);
- return TestSuiteNoAtExit(argc, argv).Run();
-}
diff --git a/src/media/base/sample_format.cc b/src/media/base/sample_format.cc
deleted file mode 100644
index f827d69..0000000
--- a/src/media/base/sample_format.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/sample_format.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-int SampleFormatToBytesPerChannel(SampleFormat sample_format) {
- switch (sample_format) {
- case kUnknownSampleFormat:
- return 0;
- case kSampleFormatU8:
- return 1;
- case kSampleFormatS16:
- case kSampleFormatPlanarS16:
- return 2;
- case kSampleFormatS32:
- case kSampleFormatF32:
- case kSampleFormatPlanarF32:
- return 4;
- }
-
- NOTREACHED() << "Invalid sample format provided: " << sample_format;
- return 0;
-}
-
-const char* SampleFormatToString(SampleFormat sample_format) {
- switch(sample_format) {
- case kUnknownSampleFormat:
- return "Unknown sample format";
- case kSampleFormatU8:
- return "Unsigned 8-bit with bias of 128";
- case kSampleFormatS16:
- return "Signed 16-bit";
- case kSampleFormatS32:
- return "Signed 32-bit";
- case kSampleFormatF32:
- return "Float 32-bit";
- case kSampleFormatPlanarS16:
- return "Signed 16-bit planar";
- case kSampleFormatPlanarF32:
- return "Float 32-bit planar";
- }
- NOTREACHED() << "Invalid sample format provided: " << sample_format;
- return "";
-}
-
-} // namespace media
-
diff --git a/src/media/base/sample_format.h b/src/media/base/sample_format.h
deleted file mode 100644
index 806bc8e..0000000
--- a/src/media/base/sample_format.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SAMPLE_FORMAT_H
-#define MEDIA_BASE_SAMPLE_FORMAT_H
-
-#include "media/base/media_export.h"
-
-namespace media {
-
-enum SampleFormat {
- // These values are histogrammed over time; do not change their ordinal
- // values. When deleting a sample format replace it with a dummy value; when
- // adding a sample format, do so at the bottom before kSampleFormatMax, and
- // update the value of kSampleFormatMax.
- kUnknownSampleFormat = 0,
- kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
- kSampleFormatS16, // Signed 16-bit.
- kSampleFormatS32, // Signed 32-bit.
- kSampleFormatF32, // Float 32-bit.
- kSampleFormatPlanarS16, // Signed 16-bit planar.
- kSampleFormatPlanarF32, // Float 32-bit planar.
-
- // Must always be equal to largest value ever logged.
- kSampleFormatMax = kSampleFormatPlanarF32,
-};
-
-// Returns the number of bytes used per channel for the specified
-// |sample_format|.
-MEDIA_EXPORT int SampleFormatToBytesPerChannel(SampleFormat sample_format);
-
-// Returns the name of the sample format as a string
-MEDIA_EXPORT const char* SampleFormatToString(SampleFormat sample_format);
-
-} // namespace media
-
-#endif // MEDIA_BASE_SAMPLE_FORMAT_H
-
diff --git a/src/media/base/seekable_buffer.cc b/src/media/base/seekable_buffer.cc
deleted file mode 100644
index 48c0858..0000000
--- a/src/media/base/seekable_buffer.cc
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/seekable_buffer.h"
-
-#include <algorithm>
-
-#include "base/logging.h"
-#include "media/base/data_buffer.h"
-
-namespace media {
-
-SeekableBuffer::SeekableBuffer(int backward_capacity,
- int forward_capacity)
- : current_buffer_offset_(0),
- backward_capacity_(backward_capacity),
- backward_bytes_(0),
- forward_capacity_(forward_capacity),
- forward_bytes_(0),
- current_time_(kNoTimestamp()) {
- current_buffer_ = buffers_.begin();
-}
-
-SeekableBuffer::~SeekableBuffer() {
-}
-
-void SeekableBuffer::Clear() {
- buffers_.clear();
- current_buffer_ = buffers_.begin();
- current_buffer_offset_ = 0;
- backward_bytes_ = 0;
- forward_bytes_ = 0;
- current_time_ = kNoTimestamp();
-}
-
-int SeekableBuffer::Read(uint8* data, int size) {
- DCHECK(data);
- return InternalRead(data, size, true, 0);
-}
-
-int SeekableBuffer::Peek(uint8* data, int size, int forward_offset) {
- DCHECK(data);
- return InternalRead(data, size, false, forward_offset);
-}
-
-bool SeekableBuffer::GetCurrentChunk(const uint8** data, int* size) const {
- BufferQueue::iterator current_buffer = current_buffer_;
- int current_buffer_offset = current_buffer_offset_;
- // Advance position if we are in the end of the current buffer.
- while (current_buffer != buffers_.end() &&
- current_buffer_offset >= (*current_buffer)->GetDataSize()) {
- ++current_buffer;
- current_buffer_offset = 0;
- }
- if (current_buffer == buffers_.end())
- return false;
- *data = (*current_buffer)->GetData() + current_buffer_offset;
- *size = (*current_buffer)->GetDataSize() - current_buffer_offset;
- return true;
-}
-
-bool SeekableBuffer::Append(Buffer* buffer_in) {
- if (buffers_.empty() && buffer_in->GetTimestamp() != kNoTimestamp()) {
- current_time_ = buffer_in->GetTimestamp();
- }
-
- // Since the forward capacity is only used to check the criteria for buffer
- // full, we always append data to the buffer.
- buffers_.push_back(scoped_refptr<Buffer>(buffer_in));
-
- // After we have written the first buffer, update |current_buffer_| to point
- // to it.
- if (current_buffer_ == buffers_.end()) {
- DCHECK_EQ(0, forward_bytes_);
- current_buffer_ = buffers_.begin();
- }
-
- // Update the |forward_bytes_| counter since we have more bytes.
- forward_bytes_ += buffer_in->GetDataSize();
-
- // Advise the user to stop append if the amount of forward bytes exceeds
- // the forward capacity. A false return value means the user should stop
- // appending more data to this buffer.
- if (forward_bytes_ >= forward_capacity_)
- return false;
- return true;
-}
-
-bool SeekableBuffer::Append(const uint8* data, int size) {
- if (size > 0) {
- DataBuffer* data_buffer = new DataBuffer(data, size);
- return Append(data_buffer);
- } else {
- // Return true if we have forward capacity.
- return forward_bytes_ < forward_capacity_;
- }
-}
-
-bool SeekableBuffer::Seek(int32 offset) {
- if (offset > 0)
- return SeekForward(offset);
- else if (offset < 0)
- return SeekBackward(-offset);
- return true;
-}
-
-bool SeekableBuffer::SeekForward(int size) {
- // Perform seeking forward only if we have enough bytes in the queue.
- if (size > forward_bytes_)
- return false;
-
- // Do a read of |size| bytes.
- int taken = InternalRead(NULL, size, true, 0);
- DCHECK_EQ(taken, size);
- return true;
-}
-
-bool SeekableBuffer::SeekBackward(int size) {
- if (size > backward_bytes_)
- return false;
- // Record the number of bytes taken.
- int taken = 0;
- // Loop until we taken enough bytes and rewind by the desired |size|.
- while (taken < size) {
- // |current_buffer_| can never be invalid when we are in this loop. It can
- // only be invalid before any data is appended. The invalid case should be
- // handled by checks before we enter this loop.
- DCHECK(current_buffer_ != buffers_.end());
-
- // We try to consume at most |size| bytes in the backward direction. We also
- // have to account for the offset we are in the current buffer, so take the
- // minimum between the two to determine the amount of bytes to take from the
- // current buffer.
- int consumed = std::min(size - taken, current_buffer_offset_);
-
- // Decreases the offset in the current buffer since we are rewinding.
- current_buffer_offset_ -= consumed;
-
- // Increase the amount of bytes taken in the backward direction. This
- // determines when to stop the loop.
- taken += consumed;
-
- // Forward bytes increases and backward bytes decreases by the amount
- // consumed in the current buffer.
- forward_bytes_ += consumed;
- backward_bytes_ -= consumed;
- DCHECK_GE(backward_bytes_, 0);
-
- // The current buffer pointed by current iterator has been consumed. Move
- // the iterator backward so it points to the previous buffer.
- if (current_buffer_offset_ == 0) {
- if (current_buffer_ == buffers_.begin())
- break;
- // Move the iterator backward.
- --current_buffer_;
- // Set the offset into the current buffer to be the buffer size as we
- // are preparing for rewind for next iteration.
- current_buffer_offset_ = (*current_buffer_)->GetDataSize();
- }
- }
-
- UpdateCurrentTime(current_buffer_, current_buffer_offset_);
-
- DCHECK_EQ(taken, size);
- return true;
-}
-
-void SeekableBuffer::EvictBackwardBuffers() {
- // Advances the iterator until we hit the current pointer.
- while (backward_bytes_ > backward_capacity_) {
- BufferQueue::iterator i = buffers_.begin();
- if (i == current_buffer_)
- break;
- scoped_refptr<Buffer> buffer = *i;
- backward_bytes_ -= buffer->GetDataSize();
- DCHECK_GE(backward_bytes_, 0);
-
- buffers_.erase(i);
- }
-}
-
-int SeekableBuffer::InternalRead(uint8* data, int size,
- bool advance_position,
- int forward_offset) {
- // Counts how many bytes are actually read from the buffer queue.
- int taken = 0;
-
- BufferQueue::iterator current_buffer = current_buffer_;
- int current_buffer_offset = current_buffer_offset_;
-
- int bytes_to_skip = forward_offset;
- while (taken < size) {
- // |current_buffer| is valid since the first time this buffer is appended
- // with data.
- if (current_buffer == buffers_.end())
- break;
-
- scoped_refptr<Buffer> buffer = *current_buffer;
-
- int remaining_bytes_in_buffer =
- buffer->GetDataSize() - current_buffer_offset;
-
- if (bytes_to_skip == 0) {
- // Find the right amount to copy from the current buffer referenced by
- // |buffer|. We shall copy no more than |size| bytes in total and each
- // single step copied no more than the current buffer size.
- int copied = std::min(size - taken, remaining_bytes_in_buffer);
-
- // |data| is NULL if we are seeking forward, so there's no need to copy.
- if (data)
- memcpy(data + taken, buffer->GetData() + current_buffer_offset, copied);
-
- // Increase total number of bytes copied, which regulates when to end this
- // loop.
- taken += copied;
-
- // We have read |copied| bytes from the current buffer. Advances the
- // offset.
- current_buffer_offset += copied;
- } else {
- int skipped = std::min(remaining_bytes_in_buffer, bytes_to_skip);
- current_buffer_offset += skipped;
- bytes_to_skip -= skipped;
- }
-
- // The buffer has been consumed.
- if (current_buffer_offset == buffer->GetDataSize()) {
- if (advance_position) {
- // Next buffer may not have timestamp, so we need to update current
- // timestamp before switching to the next buffer.
- UpdateCurrentTime(current_buffer, current_buffer_offset);
- }
-
- BufferQueue::iterator next = current_buffer;
- ++next;
- // If we are at the last buffer, don't advance.
- if (next == buffers_.end())
- break;
-
- // Advances the iterator.
- current_buffer = next;
- current_buffer_offset = 0;
- }
- }
-
- if (advance_position) {
- // We have less forward bytes and more backward bytes. Updates these
- // counters by |taken|.
- forward_bytes_ -= taken;
- backward_bytes_ += taken;
- DCHECK_GE(forward_bytes_, 0);
- DCHECK(current_buffer_ != buffers_.end() || forward_bytes_ == 0);
-
- current_buffer_ = current_buffer;
- current_buffer_offset_ = current_buffer_offset;
-
- UpdateCurrentTime(current_buffer_, current_buffer_offset_);
- EvictBackwardBuffers();
- }
-
- return taken;
-}
-
-void SeekableBuffer::UpdateCurrentTime(BufferQueue::iterator buffer,
- int offset) {
- // Garbage values are unavoidable, so this check will remain.
- if (buffer != buffers_.end() && (*buffer)->GetTimestamp() != kNoTimestamp()) {
- int64 time_offset = ((*buffer)->GetDuration().InMicroseconds() *
- offset) / (*buffer)->GetDataSize();
-
- current_time_ = (*buffer)->GetTimestamp() +
- base::TimeDelta::FromMicroseconds(time_offset);
- }
-}
-
-} // namespace media
diff --git a/src/media/base/seekable_buffer.h b/src/media/base/seekable_buffer.h
deleted file mode 100644
index 0a3ff72..0000000
--- a/src/media/base/seekable_buffer.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// SeekableBuffer to support backward and forward seeking in a buffer for
-// reading a media data source.
-//
-// In order to support backward and forward seeking, this class buffers data in
-// both backward and forward directions, the current read position can be reset
-// to anywhere in the buffered data.
-//
-// The amount of data buffered is regulated by two variables at construction,
-// |backward_capacity| and |forward_capacity|.
-//
-// In the case of reading and seeking forward, the current read position
-// advances and there will be more data in the backward direction. If backward
-// bytes exceeds |backward_capacity|, the exceeding bytes are evicted and thus
-// backward_bytes() will always be less than or equal to |backward_capacity|.
-// The eviction will be caused by Read() and Seek() in the forward direction and
-// is done internally when the mentioned criteria is fulfilled.
-//
-// In the case of appending data to the buffer, there is an advisory limit of
-// how many bytes can be kept in the forward direction, regulated by
-// |forward_capacity|. The append operation (by calling Append()) that caused
-// forward bytes to exceed |forward_capacity| will have a return value that
-// advises a halt of append operation, further append operations are allowed but
-// are not advised. Since this class is used as a backend buffer for caching
-// media files downloaded from network we cannot afford losing data, we can
-// only advise a halt of further writing to this buffer.
-// This class is not inherently thread-safe. Concurrent access must be
-// externally serialized.
-
-#ifndef MEDIA_BASE_SEEKABLE_BUFFER_H_
-#define MEDIA_BASE_SEEKABLE_BUFFER_H_
-
-#include <list>
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/buffers.h"
-
-namespace media {
-
-class MEDIA_EXPORT SeekableBuffer {
- public:
- // Constructs an instance with |forward_capacity| and |backward_capacity|.
- // The values are in bytes.
- SeekableBuffer(int backward_capacity, int forward_capacity);
-
- ~SeekableBuffer();
-
- // Clears the buffer queue.
- void Clear();
-
- // Reads a maximum of |size| bytes into |data| from the current read
- // position. Returns the number of bytes read.
- // The current read position will advance by the amount of bytes read. If
- // reading caused backward_bytes() to exceed backward_capacity(), an eviction
- // of the backward buffer will be done internally.
- int Read(uint8* data, int size);
-
- // Copies up to |size| bytes from current position to |data|. Returns
- // number of bytes copied. Doesn't advance current position. Optionally
- // starts at a |forward_offset| from current position.
- int Peek(uint8* data, int size) { return Peek(data, size, 0); }
- int Peek(uint8* data, int size, int forward_offset);
-
- // Returns pointer to the current chunk of data that is being consumed.
- // If there is no data left in the buffer false is returned, otherwise
- // true is returned and |data| and |size| are updated. The returned
- // |data| value becomes invalid when Read(), Append() or Seek()
- // are called.
- bool GetCurrentChunk(const uint8** data, int* size) const;
-
- // Appends |buffer_in| to this buffer. Returns false if forward_bytes() is
- // greater than or equals to forward_capacity(), true otherwise. The data
- // is added to the buffer in any case.
- bool Append(Buffer* buffer_in);
-
- // Appends |size| bytes of |data| to the buffer. Result is the same
- // as for Append(Buffer*).
- bool Append(const uint8* data, int size);
-
- // Moves the read position by |offset| bytes. If |offset| is positive, the
- // current read position is moved forward. If negative, the current read
- // position is moved backward. A zero |offset| value will keep the current
- // read position stationary.
- // If |offset| exceeds bytes buffered in either direction, reported by
- // forward_bytes() when seeking forward and backward_bytes() when seeking
- // backward, the seek operation will fail and return value will be false.
- // If the seek operation fails, the current read position will not be updated.
- // If a forward seeking caused backward_bytes() to exceed backward_capacity(),
- // this method call will cause an eviction of the backward buffer.
- bool Seek(int32 offset);
-
- // Returns the number of bytes buffered beyond the current read position.
- int forward_bytes() const { return forward_bytes_; }
-
- // Returns the number of bytes buffered that precedes the current read
- // position.
- int backward_bytes() const { return backward_bytes_; }
-
- // Sets the forward_capacity to |new_forward_capacity| bytes.
- void set_forward_capacity(int new_forward_capacity) {
- forward_capacity_ = new_forward_capacity;
- }
-
- // Sets the backward_capacity to |new_backward_capacity| bytes.
- void set_backward_capacity(int new_backward_capacity) {
- backward_capacity_ = new_backward_capacity;
- }
-
- // Returns the maximum number of bytes that should be kept in the forward
- // direction.
- int forward_capacity() const { return forward_capacity_; }
-
- // Returns the maximum number of bytes that should be kept in the backward
- // direction.
- int backward_capacity() const { return backward_capacity_; }
-
- // Returns the current timestamp, taking into account current offset. The
- // value calculated based on the timestamp of the current buffer. If
- // timestamp for the current buffer is set to 0 or the data was added with
- // Append(const uint*, int), then returns value that corresponds to the
- // last position in a buffer that had timestamp set.
- // kNoTimestamp() is returned if no buffers we read from had timestamp set.
- base::TimeDelta current_time() const { return current_time_; }
-
- private:
- // Definition of the buffer queue.
- typedef std::list<scoped_refptr<Buffer> > BufferQueue;
-
- // A helper method to evict buffers in the backward direction until backward
- // bytes is within the backward capacity.
- void EvictBackwardBuffers();
-
- // An internal method shared by Read() and SeekForward() that actually does
- // reading. It reads a maximum of |size| bytes into |data|. Returns the number
- // of bytes read. The current read position will be moved forward by the
- // number of bytes read. If |data| is NULL, only the current read position
- // will advance but no data will be copied.
- int InternalRead(
- uint8* data, int size, bool advance_position, int forward_offset);
-
- // A helper method that moves the current read position forward by |size|
- // bytes.
- // If the return value is true, the operation completed successfully.
- // If the return value is false, |size| is greater than forward_bytes() and
- // the seek operation failed. The current read position is not updated.
- bool SeekForward(int size);
-
- // A helper method that moves the current read position backward by |size|
- // bytes.
- // If the return value is true, the operation completed successfully.
- // If the return value is false, |size| is greater than backward_bytes() and
- // the seek operation failed. The current read position is not updated.
- bool SeekBackward(int size);
-
- // Updates |current_time_| with the time that corresponds to the
- // specified position in the buffer.
- void UpdateCurrentTime(BufferQueue::iterator buffer, int offset);
-
- BufferQueue::iterator current_buffer_;
- BufferQueue buffers_;
- int current_buffer_offset_;
-
- int backward_capacity_;
- int backward_bytes_;
-
- int forward_capacity_;
- int forward_bytes_;
-
- // Keeps track of the most recent time we've seen in case the |buffers_| is
- // empty when our owner asks what time it is.
- base::TimeDelta current_time_;
-
- DISALLOW_COPY_AND_ASSIGN(SeekableBuffer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SEEKABLE_BUFFER_H_
diff --git a/src/media/base/seekable_buffer_unittest.cc b/src/media/base/seekable_buffer_unittest.cc
deleted file mode 100644
index 38d36a6..0000000
--- a/src/media/base/seekable_buffer_unittest.cc
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time.h"
-#include "media/base/data_buffer.h"
-#include "media/base/seekable_buffer.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class SeekableBufferTest : public testing::Test {
- public:
- SeekableBufferTest() : buffer_(kBufferSize, kBufferSize) {
- }
-
- protected:
- static const int kDataSize = 409600;
- static const int kBufferSize = 4096;
- static const int kWriteSize = 512;
-
- virtual void SetUp() {
- // Setup seed.
- int seed = static_cast<int32>(base::Time::Now().ToInternalValue());
- srand(seed);
- VLOG(1) << "Random seed: " << seed;
-
- // Creates a test data.
- for (int i = 0; i < kDataSize; i++)
- data_[i] = static_cast<char>(rand());
- }
-
- int GetRandomInt(int maximum) {
- return rand() % maximum + 1;
- }
-
- SeekableBuffer buffer_;
- uint8 data_[kDataSize];
- uint8 write_buffer_[kDataSize];
-};
-
-TEST_F(SeekableBufferTest, RandomReadWrite) {
- int write_position = 0;
- int read_position = 0;
- while (read_position < kDataSize) {
- // Write a random amount of data.
- int write_size = GetRandomInt(kBufferSize);
- write_size = std::min(write_size, kDataSize - write_position);
- bool should_append = buffer_.Append(data_ + write_position, write_size);
- write_position += write_size;
- EXPECT_GE(write_position, read_position);
- EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
- EXPECT_EQ(should_append, buffer_.forward_bytes() < kBufferSize)
- << "Incorrect buffer full reported";
-
- // Peek a random amount of data.
- int copy_size = GetRandomInt(kBufferSize);
- int bytes_copied = buffer_.Peek(write_buffer_, copy_size);
- EXPECT_GE(copy_size, bytes_copied);
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_copied));
-
- // Read a random amount of data.
- int read_size = GetRandomInt(kBufferSize);
- int bytes_read = buffer_.Read(write_buffer_, read_size);
- EXPECT_GE(read_size, bytes_read);
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
- read_position += bytes_read;
- EXPECT_GE(write_position, read_position);
- EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
- }
-}
-
-TEST_F(SeekableBufferTest, ReadWriteSeek) {
- const int kReadSize = kWriteSize / 4;
-
- for (int i = 0; i < 10; ++i) {
- // Write until buffer is full.
- for (int j = 0; j < kBufferSize; j += kWriteSize) {
- bool should_append = buffer_.Append(data_ + j, kWriteSize);
- EXPECT_EQ(j < kBufferSize - kWriteSize, should_append)
- << "Incorrect buffer full reported";
- EXPECT_EQ(j + kWriteSize, buffer_.forward_bytes());
- }
-
- // Simulate a read and seek pattern. Each loop reads 4 times, each time
- // reading a quarter of |kWriteSize|.
- int read_position = 0;
- int forward_bytes = kBufferSize;
- for (int j = 0; j < kBufferSize; j += kWriteSize) {
- // Read.
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- forward_bytes -= kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
- read_position += kReadSize;
-
- // Seek forward.
- EXPECT_TRUE(buffer_.Seek(2 * kReadSize));
- forward_bytes -= 2 * kReadSize;
- read_position += 2 * kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
-
- // Copy.
- EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
-
- // Read.
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- forward_bytes -= kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
- read_position += kReadSize;
-
- // Seek backward.
- EXPECT_TRUE(buffer_.Seek(-3 * static_cast<int32>(kReadSize)));
- forward_bytes += 3 * kReadSize;
- read_position -= 3 * kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
-
- // Copy.
- EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
-
- // Read.
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- forward_bytes -= kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
- read_position += kReadSize;
-
- // Copy.
- EXPECT_EQ(kReadSize, buffer_.Peek(write_buffer_, kReadSize));
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
-
- // Read.
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- forward_bytes -= kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, kReadSize));
- read_position += kReadSize;
-
- // Seek forward.
- EXPECT_TRUE(buffer_.Seek(kReadSize));
- forward_bytes -= kReadSize;
- read_position += kReadSize;
- EXPECT_EQ(forward_bytes, buffer_.forward_bytes());
- }
- }
-}
-
-TEST_F(SeekableBufferTest, BufferFull) {
- const int kMaxWriteSize = 2 * kBufferSize;
-
- // Write and expect the buffer to be not full.
- for (int i = 0; i < kBufferSize - kWriteSize; i += kWriteSize) {
- EXPECT_TRUE(buffer_.Append(data_ + i, kWriteSize));
- EXPECT_EQ(i + kWriteSize, buffer_.forward_bytes());
- }
-
- // Write until we have kMaxWriteSize bytes in the buffer. Buffer is full in
- // these writes.
- for (int i = buffer_.forward_bytes(); i < kMaxWriteSize; i += kWriteSize) {
- EXPECT_FALSE(buffer_.Append(data_ + i, kWriteSize));
- EXPECT_EQ(i + kWriteSize, buffer_.forward_bytes());
- }
-
- // Read until the buffer is empty.
- int read_position = 0;
- while (buffer_.forward_bytes()) {
- // Read a random amount of data.
- int read_size = GetRandomInt(kBufferSize);
- int forward_bytes = buffer_.forward_bytes();
- int bytes_read = buffer_.Read(write_buffer_, read_size);
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
- if (read_size > forward_bytes)
- EXPECT_EQ(forward_bytes, bytes_read);
- else
- EXPECT_EQ(read_size, bytes_read);
- read_position += bytes_read;
- EXPECT_GE(kMaxWriteSize, read_position);
- EXPECT_EQ(kMaxWriteSize - read_position, buffer_.forward_bytes());
- }
-
- // Expects we have no bytes left.
- EXPECT_EQ(0, buffer_.forward_bytes());
- EXPECT_EQ(0, buffer_.Read(write_buffer_, 1));
-}
-
-TEST_F(SeekableBufferTest, SeekBackward) {
- EXPECT_EQ(0, buffer_.forward_bytes());
- EXPECT_EQ(0, buffer_.backward_bytes());
- EXPECT_FALSE(buffer_.Seek(1));
- EXPECT_FALSE(buffer_.Seek(-1));
-
- const int kReadSize = 256;
-
- // Write into buffer until it's full.
- for (int i = 0; i < kBufferSize; i += kWriteSize) {
- // Write a random amount of data.
- buffer_.Append(data_ + i, kWriteSize);
- }
-
- // Read until buffer is empty.
- for (int i = 0; i < kBufferSize; i += kReadSize) {
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + i, kReadSize));
- }
-
- // Seek backward.
- EXPECT_TRUE(buffer_.Seek(-static_cast<int32>(kBufferSize)));
- EXPECT_FALSE(buffer_.Seek(-1));
-
- // Read again.
- for (int i = 0; i < kBufferSize; i += kReadSize) {
- EXPECT_EQ(kReadSize, buffer_.Read(write_buffer_, kReadSize));
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + i, kReadSize));
- }
-}
-
-TEST_F(SeekableBufferTest, GetCurrentChunk) {
- const int kSeekSize = kWriteSize / 3;
-
- scoped_refptr<DataBuffer> buffer(new DataBuffer(data_, kWriteSize));
-
- const uint8* data;
- int size;
- EXPECT_FALSE(buffer_.GetCurrentChunk(&data, &size));
-
- buffer_.Append(buffer.get());
- EXPECT_TRUE(buffer_.GetCurrentChunk(&data, &size));
- EXPECT_EQ(data, buffer->GetData());
- EXPECT_EQ(size, buffer->GetDataSize());
-
- buffer_.Seek(kSeekSize);
- EXPECT_TRUE(buffer_.GetCurrentChunk(&data, &size));
- EXPECT_EQ(data, buffer->GetData() + kSeekSize);
- EXPECT_EQ(size, buffer->GetDataSize() - kSeekSize);
-}
-
-TEST_F(SeekableBufferTest, SeekForward) {
- int write_position = 0;
- int read_position = 0;
- while (read_position < kDataSize) {
- for (int i = 0; i < 10 && write_position < kDataSize; ++i) {
- // Write a random amount of data.
- int write_size = GetRandomInt(kBufferSize);
- write_size = std::min(write_size, kDataSize - write_position);
-
- bool should_append = buffer_.Append(data_ + write_position, write_size);
- write_position += write_size;
- EXPECT_GE(write_position, read_position);
- EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
- EXPECT_EQ(should_append, buffer_.forward_bytes() < kBufferSize)
- << "Incorrect buffer full status reported";
- }
-
- // Read a random amount of data.
- int seek_size = GetRandomInt(kBufferSize);
- if (buffer_.Seek(seek_size))
- read_position += seek_size;
- EXPECT_GE(write_position, read_position);
- EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
-
- // Read a random amount of data.
- int read_size = GetRandomInt(kBufferSize);
- int bytes_read = buffer_.Read(write_buffer_, read_size);
- EXPECT_GE(read_size, bytes_read);
- EXPECT_EQ(0, memcmp(write_buffer_, data_ + read_position, bytes_read));
- read_position += bytes_read;
- EXPECT_GE(write_position, read_position);
- EXPECT_EQ(write_position - read_position, buffer_.forward_bytes());
- }
-}
-
-TEST_F(SeekableBufferTest, AllMethods) {
- EXPECT_EQ(0, buffer_.Read(write_buffer_, 0));
- EXPECT_EQ(0, buffer_.Read(write_buffer_, 1));
- EXPECT_TRUE(buffer_.Seek(0));
- EXPECT_FALSE(buffer_.Seek(-1));
- EXPECT_FALSE(buffer_.Seek(1));
- EXPECT_EQ(0, buffer_.forward_bytes());
- EXPECT_EQ(0, buffer_.backward_bytes());
-}
-
-TEST_F(SeekableBufferTest, GetTime) {
- const int64 kNoTS = kNoTimestamp().ToInternalValue();
- const struct {
- int64 first_time_useconds;
- int64 duration_useconds;
- int consume_bytes;
- int64 expected_time;
- } tests[] = {
- { kNoTS, 1000000, 0, kNoTS },
- { kNoTS, 4000000, 0, kNoTS },
- { kNoTS, 8000000, 0, kNoTS },
- { kNoTS, 1000000, kWriteSize / 2, kNoTS },
- { kNoTS, 4000000, kWriteSize / 2, kNoTS },
- { kNoTS, 8000000, kWriteSize / 2, kNoTS },
- { kNoTS, 1000000, kWriteSize, kNoTS },
- { kNoTS, 4000000, kWriteSize, kNoTS },
- { kNoTS, 8000000, kWriteSize, kNoTS },
- { 0, 1000000, 0, 0 },
- { 0, 4000000, 0, 0 },
- { 0, 8000000, 0, 0 },
- { 0, 1000000, kWriteSize / 2, 500000 },
- { 0, 4000000, kWriteSize / 2, 2000000 },
- { 0, 8000000, kWriteSize / 2, 4000000 },
- { 0, 1000000, kWriteSize, 1000000 },
- { 0, 4000000, kWriteSize, 4000000 },
- { 0, 8000000, kWriteSize, 8000000 },
- { 5, 1000000, 0, 5 },
- { 5, 4000000, 0, 5 },
- { 5, 8000000, 0, 5 },
- { 5, 1000000, kWriteSize / 2, 500005 },
- { 5, 4000000, kWriteSize / 2, 2000005 },
- { 5, 8000000, kWriteSize / 2, 4000005 },
- { 5, 1000000, kWriteSize, 1000005 },
- { 5, 4000000, kWriteSize, 4000005 },
- { 5, 8000000, kWriteSize, 8000005 },
- };
-
- // current_time() must initially return kNoTimestamp().
- EXPECT_EQ(kNoTimestamp().ToInternalValue(),
- buffer_.current_time().ToInternalValue());
-
- scoped_refptr<DataBuffer> buffer(new DataBuffer(data_, kWriteSize));
-
- for (size_t i = 0; i < ARRAYSIZE_UNSAFE(tests); ++i) {
- buffer->SetTimestamp(base::TimeDelta::FromMicroseconds(
- tests[i].first_time_useconds));
- buffer->SetDuration(base::TimeDelta::FromMicroseconds(
- tests[i].duration_useconds));
- buffer_.Append(buffer.get());
- EXPECT_TRUE(buffer_.Seek(tests[i].consume_bytes));
-
- int64 actual = buffer_.current_time().ToInternalValue();
-
- EXPECT_EQ(tests[i].expected_time, actual) << "With test = { start:"
- << tests[i].first_time_useconds << ", duration:"
- << tests[i].duration_useconds << ", consumed:"
- << tests[i].consume_bytes << " }\n";
-
- buffer_.Clear();
- }
-}
-
-} // namespace media
diff --git a/src/media/base/serial_runner.cc b/src/media/base/serial_runner.cc
deleted file mode 100644
index 9d6c6ed..0000000
--- a/src/media/base/serial_runner.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/serial_runner.h"
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/message_loop.h"
-#include "base/message_loop_proxy.h"
-
-namespace media {
-
-// Converts a bound function accepting a Closure into a bound function
-// accepting a PipelineStatusCB. Since closures have no way of reporting a
-// status |status_cb| is executed with PIPELINE_OK.
-static void RunBoundClosure(
- const SerialRunner::BoundClosure& bound_closure,
- const PipelineStatusCB& status_cb) {
- bound_closure.Run(base::Bind(status_cb, PIPELINE_OK));
-}
-
-// Runs |status_cb| with |last_status| on |message_loop|.
-static void RunOnMessageLoop(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
- const PipelineStatusCB& status_cb,
- PipelineStatus last_status) {
- // Force post to permit cancellation of a series in the scenario where all
- // bound functions run on the same thread.
- message_loop->PostTask(FROM_HERE, base::Bind(status_cb, last_status));
-}
-
-SerialRunner::Queue::Queue() {}
-SerialRunner::Queue::~Queue() {}
-
-void SerialRunner::Queue::Push(
- const BoundClosure& bound_closure) {
- bound_fns_.push(base::Bind(&RunBoundClosure, bound_closure));
-}
-
-void SerialRunner::Queue::Push(
- const BoundPipelineStatusCB& bound_status_cb) {
- bound_fns_.push(bound_status_cb);
-}
-
-SerialRunner::BoundPipelineStatusCB SerialRunner::Queue::Pop() {
- BoundPipelineStatusCB bound_fn = bound_fns_.front();
- bound_fns_.pop();
- return bound_fn;
-}
-
-bool SerialRunner::Queue::empty() {
- return bound_fns_.empty();
-}
-
-SerialRunner::SerialRunner(
- const Queue& bound_fns, const PipelineStatusCB& done_cb)
- : weak_this_(this),
- message_loop_(base::MessageLoopProxy::current()),
- bound_fns_(bound_fns),
- done_cb_(done_cb) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
- &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr(),
- PIPELINE_OK));
-}
-
-SerialRunner::~SerialRunner() {}
-
-scoped_ptr<SerialRunner> SerialRunner::Run(
- const Queue& bound_fns, const PipelineStatusCB& done_cb) {
- scoped_ptr<SerialRunner> callback_series(
- new SerialRunner(bound_fns, done_cb));
- return callback_series.Pass();
-}
-
-void SerialRunner::RunNextInSeries(PipelineStatus last_status) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK(!done_cb_.is_null());
-
- if (bound_fns_.empty() || last_status != PIPELINE_OK) {
- base::ResetAndReturn(&done_cb_).Run(last_status);
- return;
- }
-
- BoundPipelineStatusCB bound_fn = bound_fns_.Pop();
- bound_fn.Run(base::Bind(&RunOnMessageLoop, message_loop_, base::Bind(
- &SerialRunner::RunNextInSeries, weak_this_.GetWeakPtr())));
-}
-
-} // namespace media
diff --git a/src/media/base/serial_runner.h b/src/media/base/serial_runner.h
deleted file mode 100644
index 16fa6f3..0000000
--- a/src/media/base/serial_runner.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SERIAL_RUNNER_H_
-#define MEDIA_BASE_SERIAL_RUNNER_H_
-
-#include <queue>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
-#include "media/base/pipeline_status.h"
-
-namespace base {
-class MessageLoopProxy;
-}
-
-namespace media {
-
-// Runs a series of bound functions accepting Closures or PipelineStatusCB.
-// SerialRunner doesn't use regular Closure/PipelineStatusCBs as it late binds
-// the completion callback as the series progresses.
-class SerialRunner {
- public:
- typedef base::Callback<void(const base::Closure&)> BoundClosure;
- typedef base::Callback<void(const PipelineStatusCB&)> BoundPipelineStatusCB;
-
- // Serial queue of bound functions to run.
- class Queue {
- public:
- Queue();
- ~Queue();
-
- void Push(const BoundClosure& bound_fn);
- void Push(const BoundPipelineStatusCB& bound_fn);
-
- private:
- friend class SerialRunner;
-
- BoundPipelineStatusCB Pop();
- bool empty();
-
- std::queue<BoundPipelineStatusCB> bound_fns_;
- };
-
- // Executes the bound functions in series, executing |done_cb| when finished.
- //
- // All bound functions are executed on the thread that Run() is called on,
- // including |done_cb|.
- //
- // Deleting the object will prevent execution of any unstarted bound
- // functions, including |done_cb|.
- static scoped_ptr<SerialRunner> Run(
- const Queue& bound_fns, const PipelineStatusCB& done_cb);
-
- private:
- friend class scoped_ptr<SerialRunner>;
-
- SerialRunner(const Queue& bound_fns, const PipelineStatusCB& done_cb);
- ~SerialRunner();
-
- void RunNextInSeries(PipelineStatus last_status);
-
- base::WeakPtrFactory<SerialRunner> weak_this_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
- Queue bound_fns_;
- PipelineStatusCB done_cb_;
-
- DISALLOW_COPY_AND_ASSIGN(SerialRunner);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SERIAL_RUNNER_H_
diff --git a/src/media/base/shell_audio_bus.cc b/src/media/base/shell_audio_bus.cc
deleted file mode 100644
index 4b5ad5e..0000000
--- a/src/media/base/shell_audio_bus.cc
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_audio_bus.h"
-
-#include <algorithm>
-#include <limits>
-
-namespace media {
-
-namespace {
-
-typedef ShellAudioBus::StorageType StorageType;
-typedef ShellAudioBus::SampleType SampleType;
-
-const float kFloat32ToInt16Factor = 32768.f;
-
-inline void ConvertSample(ShellAudioBus::SampleType src_type,
- const uint8* src_ptr,
- ShellAudioBus::SampleType dest_type,
- uint8* dest_ptr) {
- if (src_type == dest_type) {
- memcpy(dest_ptr, src_ptr,
- src_type == ShellAudioBus::kInt16 ? sizeof(int16) : sizeof(float));
- } else if (src_type == ShellAudioBus::kFloat32) {
- float sample_in_float = *reinterpret_cast<const float*>(src_ptr);
- int32 sample_in_int32 =
- static_cast<int32>(sample_in_float * kFloat32ToInt16Factor);
- sample_in_int32 =
- std::max<int32>(sample_in_int32, std::numeric_limits<int16>::min());
- sample_in_int32 =
- std::min<int32>(sample_in_int32, std::numeric_limits<int16>::max());
- *reinterpret_cast<int16*>(dest_ptr) = static_cast<int16>(sample_in_int32);
- } else {
- int16 sample = *reinterpret_cast<const int16*>(src_ptr);
- *reinterpret_cast<float*>(dest_ptr) =
- static_cast<float>(sample) / kFloat32ToInt16Factor;
- }
-}
-
-} // namespace
-
-ShellAudioBus::ShellAudioBus(size_t channels, size_t frames,
- SampleType sample_type, StorageType storage_type)
- : channels_(channels),
- frames_(frames),
- sample_type_(sample_type),
- storage_type_(storage_type) {
- DCHECK_GT(channels_, 0);
-
- if (storage_type_ == kInterleaved) {
- data_.reset(static_cast<uint8*>(base::AlignedAlloc(
- GetSampleSizeInBytes() * frames * channels, kChannelAlignmentInBytes)));
- channel_data_.push_back(data_.get());
- } else {
- DCHECK_EQ(storage_type_, kPlanar);
- size_t aligned_per_channel_size_in_bytes =
- (GetSampleSizeInBytes() * frames + kChannelAlignmentInBytes - 1) /
- kChannelAlignmentInBytes * kChannelAlignmentInBytes;
- data_.reset(static_cast<uint8*>(
- base::AlignedAlloc(aligned_per_channel_size_in_bytes * channels,
- kChannelAlignmentInBytes)));
- channel_data_.reserve(channels);
- for (size_t i = 0; i < channels_; ++i) {
- channel_data_.push_back(data_.get() +
- aligned_per_channel_size_in_bytes * i);
- }
- }
-}
-
-ShellAudioBus::ShellAudioBus(size_t frames, const std::vector<float*>& samples)
- : channels_(samples.size()),
- frames_(frames),
- sample_type_(kFloat32),
- storage_type_(kPlanar) {
- DCHECK_GT(channels_, 0);
-
- channel_data_.reserve(samples.size());
- for (size_t i = 0; i < samples.size(); ++i) {
- channel_data_.push_back(reinterpret_cast<uint8*>(samples[i]));
- }
-}
-
-ShellAudioBus::ShellAudioBus(size_t channels, size_t frames, float* samples)
- : channels_(channels),
- frames_(frames),
- sample_type_(kFloat32),
- storage_type_(kInterleaved) {
- DCHECK_GT(channels_, 0);
-
- channel_data_.push_back(reinterpret_cast<uint8*>(samples));
-}
-
-ShellAudioBus::ShellAudioBus(size_t frames, const std::vector<int16*>& samples)
- : channels_(samples.size()),
- frames_(frames),
- sample_type_(kInt16),
- storage_type_(kPlanar) {
- DCHECK_GT(channels_, 0);
-
- channel_data_.reserve(samples.size());
- for (size_t i = 0; i < samples.size(); ++i) {
- channel_data_.push_back(reinterpret_cast<uint8*>(samples[i]));
- }
-}
-
-ShellAudioBus::ShellAudioBus(size_t channels, size_t frames, int16* samples)
- : channels_(channels),
- frames_(frames),
- sample_type_(kInt16),
- storage_type_(kInterleaved) {
- DCHECK_GT(channels_, 0);
-
- channel_data_.push_back(reinterpret_cast<uint8*>(samples));
-}
-
-size_t ShellAudioBus::GetSampleSizeInBytes() const {
- if (sample_type_ == kInt16) {
- return sizeof(int16);
- }
- DCHECK_EQ(sample_type_, kFloat32);
- return sizeof(float);
-}
-
-const uint8* ShellAudioBus::interleaved_data() const {
- DCHECK_EQ(storage_type_, kInterleaved);
- return channel_data_[0];
-}
-
-const uint8* ShellAudioBus::planar_data(size_t channel) const {
- DCHECK_LT(channel, channels_);
- DCHECK_EQ(storage_type_, kPlanar);
- return channel_data_[channel];
-}
-
-void ShellAudioBus::ZeroFrames(size_t start_frame, size_t end_frame) {
- DCHECK_LE(start_frame, end_frame);
- DCHECK_LE(end_frame, frames_);
- end_frame = std::min(end_frame, frames_);
- start_frame = std::min(start_frame, end_frame);
- if (start_frame >= end_frame) {
- return;
- }
- if (storage_type_ == kInterleaved) {
- memset(GetSamplePtr(0, start_frame), 0,
- GetSampleSizeInBytes() * (end_frame - start_frame) * channels_);
- } else {
- for (size_t channel = 0; channel < channels_; ++channel) {
- memset(GetSamplePtr(channel, start_frame), 0,
- GetSampleSizeInBytes() * (end_frame - start_frame));
- }
- }
-}
-
-void ShellAudioBus::Assign(const ShellAudioBus& source) {
- DCHECK_EQ(channels_, source.channels_);
- if (channels_ != source.channels_) {
- ZeroAllFrames();
- return;
- }
-
- if (sample_type_ == source.sample_type_ &&
- storage_type_ == source.storage_type_) {
- size_t frames = std::min(frames_, source.frames_);
- if (storage_type_ == kInterleaved) {
- memcpy(GetSamplePtr(0, 0), source.GetSamplePtr(0, 0),
- GetSampleSizeInBytes() * frames * channels_);
- } else {
- for (size_t channel = 0; channel < channels_; ++channel) {
- memcpy(GetSamplePtr(channel, 0), source.GetSamplePtr(channel, 0),
- GetSampleSizeInBytes() * frames);
- }
- }
- return;
- }
-
- size_t frames = std::min(frames_, source.frames_);
- for (size_t channel = 0; channel < channels_; ++channel) {
- for (size_t frame = 0; frame < frames; ++frame) {
- ConvertSample(source.sample_type_, source.GetSamplePtr(channel, frame),
- sample_type_, GetSamplePtr(channel, frame));
- }
- }
-}
-
-void ShellAudioBus::Assign(const ShellAudioBus& source,
- const std::vector<float>& matrix) {
- DCHECK_EQ(channels() * source.channels(), matrix.size());
- DCHECK_EQ(sample_type_, kFloat32);
- DCHECK_EQ(source.sample_type_, kFloat32);
- if (channels() * source.channels() != matrix.size() ||
- sample_type_ != kFloat32 || source.sample_type_ != kFloat32) {
- ZeroAllFrames();
- return;
- }
-
- size_t frames = std::min(frames_, source.frames_);
- for (size_t dest_channel = 0; dest_channel < channels_; ++dest_channel) {
- for (size_t frame = 0; frame < frames; ++frame) {
- float mixed_sample = 0.f;
- for (size_t src_channel = 0; src_channel < source.channels_;
- ++src_channel) {
- mixed_sample += source.GetFloat32Sample(src_channel, frame) *
- matrix[dest_channel * source.channels_ + src_channel];
- }
- SetFloat32Sample(dest_channel, frame, mixed_sample);
- }
- }
-}
-
-template <typename SourceSampleType,
- typename DestSampleType,
- StorageType SourceStorageType,
- StorageType DestStorageType>
-void ShellAudioBus::MixForTypes(const ShellAudioBus& source) {
- const size_t frames = std::min(frames_, source.frames_);
-
- for (size_t channel = 0; channel < channels_; ++channel) {
- for (size_t frame = 0; frame < frames; ++frame) {
- *reinterpret_cast<DestSampleType*>(
- GetSamplePtrForType<DestSampleType, DestStorageType>(channel,
- frame)) +=
- source.GetSampleForType<SourceSampleType, SourceStorageType>(channel,
- frame);
- }
- }
-}
-
-void ShellAudioBus::Mix(const ShellAudioBus& source) {
- DCHECK_EQ(channels_, source.channels_);
-
- if (channels_ != source.channels_) {
- ZeroAllFrames();
- return;
- }
-
- // Profiling has identified this area of code as hot, so instead of calling
- // GetSamplePtr, which branches each time it is called, we branch once
- // before we loop and inline the branch of the function we want.
- if (source.sample_type_ == kInt16 && sample_type_ == kInt16 &&
- source.storage_type_ == kInterleaved && storage_type_ == kInterleaved) {
- MixForTypes<int16, int16, kInterleaved, kInterleaved>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kInt16 &&
- source.storage_type_ == kInterleaved && storage_type_ == kPlanar) {
- MixForTypes<int16, int16, kInterleaved, kPlanar>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kInt16 &&
- source.storage_type_ == kPlanar && storage_type_ == kInterleaved) {
- MixForTypes<int16, int16, kPlanar, kInterleaved>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kInt16 &&
- source.storage_type_ == kPlanar && storage_type_ == kPlanar) {
- MixForTypes<int16, int16, kPlanar, kPlanar>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kFloat32 &&
- source.storage_type_ == kInterleaved &&
- storage_type_ == kInterleaved) {
- MixForTypes<int16, float, kInterleaved, kInterleaved>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kFloat32 &&
- source.storage_type_ == kInterleaved && storage_type_ == kPlanar) {
- MixForTypes<int16, float, kInterleaved, kPlanar>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kFloat32 &&
- source.storage_type_ == kPlanar && storage_type_ == kInterleaved) {
- MixForTypes<int16, float, kPlanar, kInterleaved>(source);
- } else if (source.sample_type_ == kInt16 && sample_type_ == kFloat32 &&
- source.storage_type_ == kPlanar && storage_type_ == kPlanar) {
- MixForTypes<int16, float, kPlanar, kPlanar>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kInt16 &&
- source.storage_type_ == kInterleaved &&
- storage_type_ == kInterleaved) {
- MixForTypes<float, int16, kInterleaved, kInterleaved>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kInt16 &&
- source.storage_type_ == kInterleaved && storage_type_ == kPlanar) {
- MixForTypes<float, int16, kInterleaved, kPlanar>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kInt16 &&
- source.storage_type_ == kPlanar && storage_type_ == kInterleaved) {
- MixForTypes<float, int16, kPlanar, kInterleaved>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kInt16 &&
- source.storage_type_ == kPlanar && storage_type_ == kPlanar) {
- MixForTypes<float, int16, kPlanar, kPlanar>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kFloat32 &&
- source.storage_type_ == kInterleaved &&
- storage_type_ == kInterleaved) {
- MixForTypes<float, float, kInterleaved, kInterleaved>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kFloat32 &&
- source.storage_type_ == kInterleaved && storage_type_ == kPlanar) {
- MixForTypes<float, float, kInterleaved, kPlanar>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kFloat32 &&
- source.storage_type_ == kPlanar && storage_type_ == kInterleaved) {
- MixForTypes<float, float, kPlanar, kInterleaved>(source);
- } else if (source.sample_type_ == kFloat32 && sample_type_ == kFloat32 &&
- source.storage_type_ == kPlanar && storage_type_ == kPlanar) {
- MixForTypes<float, float, kPlanar, kPlanar>(source);
- } else {
- NOTREACHED();
- }
-}
-
-void ShellAudioBus::Mix(const ShellAudioBus& source,
- const std::vector<float>& matrix) {
- DCHECK_EQ(channels() * source.channels(), matrix.size());
- DCHECK_EQ(sample_type_, kFloat32);
- DCHECK_EQ(source.sample_type_, kFloat32);
- if (channels() * source.channels() != matrix.size() ||
- sample_type_ != kFloat32 || source.sample_type_ != kFloat32) {
- ZeroAllFrames();
- return;
- }
-
- size_t frames = std::min(frames_, source.frames_);
- for (size_t dest_channel = 0; dest_channel < channels_; ++dest_channel) {
- for (size_t frame = 0; frame < frames; ++frame) {
- float mixed_sample = 0.f;
- for (size_t src_channel = 0; src_channel < source.channels_;
- ++src_channel) {
- mixed_sample += source.GetFloat32Sample(src_channel, frame) *
- matrix[dest_channel * source.channels_ + src_channel];
- }
- mixed_sample += GetFloat32Sample(dest_channel, frame);
- SetFloat32Sample(dest_channel, frame, mixed_sample);
- }
- }
-}
-
-uint8* ShellAudioBus::GetSamplePtr(size_t channel, size_t frame) {
- DCHECK_LT(channel, channels_);
- DCHECK_LT(frame, frames_);
-
- if (storage_type_ == kInterleaved) {
- return channel_data_[0] +
- GetSampleSizeInBytes() * (channels_ * frame + channel);
- } else {
- return channel_data_[channel] + GetSampleSizeInBytes() * frame;
- }
-}
-
-const uint8* ShellAudioBus::GetSamplePtr(size_t channel, size_t frame) const {
- DCHECK_LT(channel, channels_);
- DCHECK_LT(frame, frames_);
-
- if (storage_type_ == kInterleaved) {
- return channel_data_[0] +
- GetSampleSizeInBytes() * (channels_ * frame + channel);
- } else {
- return channel_data_[channel] + GetSampleSizeInBytes() * frame;
- }
-}
-
-} // namespace media
diff --git a/src/media/base/shell_audio_bus.h b/src/media/base/shell_audio_bus.h
deleted file mode 100644
index 5beaf2c..0000000
--- a/src/media/base/shell_audio_bus.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_AUDIO_BUS_H_
-#define MEDIA_BASE_SHELL_AUDIO_BUS_H_
-
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// This swiss army knife class encapsulates audio data in multiple channels, in
-// different storage types and with different sample sizes. It also provides
-// operation to convert, mix between different types of audio data. It should
-// be used whenever such audio data is stored or passed around.
-// In this class, "sample" is one audio wave form data at a certain time from a
-// certain channel, while "frame" refers to all samples at the same time from
-// all channels. For example, for a 48000KHz stereo audio with samples in
-// float, its sample size in bytes is 4 but its frame size in bytes is 8. One
-// second of such audio contains 48000 frames (96000 samples).
-// Note: This class doesn't do endianness conversions. It assumes that all data
-// is in the correct endianness.
-class MEDIA_EXPORT ShellAudioBus {
- public:
- // Guaranteed alignment of each channel's data; use 64-byte alignment so it
- // satisfies all our current platforms. Note that this is only used for
- // buffers that are allocated and owned by the ShellAudioBus. We don't
- // enforce alignment for the buffers passed in and extra caution should be
- // taken if they are used as hardware buffer.
- static const size_t kChannelAlignmentInBytes = 64;
-
- enum SampleType { kInt16, kFloat32 };
-
- enum StorageType { kInterleaved, kPlanar };
-
- ShellAudioBus(size_t channels, size_t frames, SampleType sample_type,
- StorageType storage_type);
- ShellAudioBus(size_t frames, const std::vector<float*>& samples);
- ShellAudioBus(size_t channels, size_t frames, float* samples);
- ShellAudioBus(size_t frames, const std::vector<int16*>& samples);
- ShellAudioBus(size_t channels, size_t frames, int16* samples);
-
- size_t channels() const { return channels_; }
- size_t frames() const { return frames_; }
- SampleType sample_type() const { return sample_type_; }
- StorageType storage_type() const { return storage_type_; }
- size_t GetSampleSizeInBytes() const;
- const uint8* interleaved_data() const;
- const uint8* planar_data(size_t channel) const;
-
- int16 GetInt16Sample(size_t channel, size_t frame) const {
- DCHECK_EQ(sample_type_, kInt16);
- return *reinterpret_cast<const int16*>(GetSamplePtr(channel, frame));
- }
- float GetFloat32Sample(size_t channel, size_t frame) const {
- DCHECK_EQ(sample_type_, kFloat32);
- return *reinterpret_cast<const float*>(GetSamplePtr(channel, frame));
- }
-
- void ZeroFrames(size_t start_frame, size_t end_frame);
- void ZeroAllFrames() { ZeroFrames(0, frames()); }
-
- // Copy frames from |source| provided that it has the same number of channels
- // as the destination object (this). This function does any necessary
- // conversion between different sample types and storage types. When source
- // has less frames than the destination object, it will only copy these frames
- // and will not fill the rest frames in our buffer with 0.
- void Assign(const ShellAudioBus& source);
-
- // The same as the above function except that this function also does mixing.
- // |matrix| is a |dest.channels()| row * |source.channels()| column matrix in
- // row major.
- // dest.sample[dest_channel][frame] =
- // source.sample[0][frame] * matrix[dest_channel * source.channels() + 0]
- // + source.sample[1][frame] * matrix[dest_channel * source.channels() + 1]
- // ...
- // + source.sample[source.channels() - 1][frame] *
- // matrix[channels() * source.channels() + source.channels() - 1];
- // Note: Both objects must have storage type of kFloat32.
- void Assign(const ShellAudioBus& source, const std::vector<float>& matrix);
-
- // The following functions are the same as the Assign() functions except that
- // they add the calculated samples to the target samples instead of replacing
- // the target samples with the calculated samples.
- // Note: Both objects must have storage type of kFloat32.
- void Mix(const ShellAudioBus& source);
- void Mix(const ShellAudioBus& source, const std::vector<float>& matrix);
-
- public:
- // The .*ForTypes? functions below are optimized versions that assume what
- // storage type the bus is using. They are meant to be called after
- // checking what storage type the bus is once, and then performing a batch
- // of operations, where it is known that the type will not change.
- template <typename SampleTypeName, StorageType T>
- inline uint8* GetSamplePtrForType(size_t channel, size_t frame) const {
- DCHECK_LT(channel, channels_);
- DCHECK_LT(frame, frames_);
-
- if (T == kInterleaved) {
- return channel_data_[0] +
- sizeof(SampleTypeName) * (channels_ * frame + channel);
- } else if (T == kPlanar) {
- return channel_data_[channel] + sizeof(SampleTypeName) * frame;
- } else {
- NOTREACHED();
- }
-
- return NULL;
- }
-
- template <typename SampleTypeName, StorageType T>
- inline SampleTypeName GetSampleForType(size_t channel, size_t frame) const {
- return *reinterpret_cast<const SampleTypeName*>(
- GetSamplePtrForType<SampleTypeName, T>(channel, frame));
- }
-
- template <typename SourceSampleType,
- typename DestSampleType,
- StorageType SourceStorageType,
- StorageType DestStorageType>
- void MixForTypes(const ShellAudioBus& source);
-
- private:
- void SetFloat32Sample(size_t channel, size_t frame, float sample) {
- DCHECK_EQ(sample_type_, kFloat32);
- *reinterpret_cast<float*>(GetSamplePtr(channel, frame)) = sample;
- }
- uint8* GetSamplePtr(size_t channel, size_t frame);
- const uint8* GetSamplePtr(size_t channel, size_t frame) const;
-
- // Contiguous block of channel memory if the memory is owned by this object.
- scoped_ptr_malloc<uint8, base::ScopedPtrAlignedFree> data_;
-
- std::vector<uint8*> channel_data_;
- size_t channels_;
- size_t frames_;
- SampleType sample_type_;
- StorageType storage_type_;
-
- DISALLOW_COPY_AND_ASSIGN(ShellAudioBus);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_AUDIO_BUS_H_
diff --git a/src/media/base/shell_audio_bus_test.cc b/src/media/base/shell_audio_bus_test.cc
deleted file mode 100644
index db5240a..0000000
--- a/src/media/base/shell_audio_bus_test.cc
+++ /dev/null
@@ -1,586 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_audio_bus.h"
-
-#include <vector>
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-namespace testing {
-namespace {
-
-template <typename T>
-bool VerifyValues(const T* data, size_t size, T value) {
- for (size_t i = 0; i < size; ++i) {
- if (data[i] != value) {
- return false;
- }
- }
- return true;
-}
-
-template <typename T>
-void FillValues(T* data, size_t size, T value) {
- for (size_t i = 0; i < size; ++i) {
- data[i] = value;
- }
-}
-
-void Scribble(uint8* data, size_t size) {
- for (size_t i = 0; i < size; ++i) {
- data[i] = static_cast<uint8>(i + 0x34);
- }
-}
-
-bool VerifyScribble(const uint8* data, size_t size) {
- for (size_t i = 0; i < size; ++i) {
- if (data[i] != static_cast<uint8>(i + 0x34)) {
- return false;
- }
- }
- return true;
-}
-
-// This function only works with audio bus containing float32 samples.
-bool IsSameChannel(const ShellAudioBus& audio_bus_1,
- size_t channel_1,
- const ShellAudioBus& audio_bus_2,
- size_t channel_2) {
- if (audio_bus_1.frames() != audio_bus_2.frames()) {
- return false;
- }
- for (size_t frame = 0; frame < audio_bus_1.frames(); ++frame) {
- if (fabsf(audio_bus_1.GetFloat32Sample(channel_1, frame) -
- audio_bus_2.GetFloat32Sample(channel_2, frame)) > 0.001) {
- return false;
- }
- }
- return true;
-}
-
-// This class allocates buffer with extra guard bytes in the front and back of
-// the buffer. It can be used to verify if the ShellAudioBus implementation
-// writes any data out of boundary.
-class GuardedBuffers {
- public:
- GuardedBuffers(size_t number_of_buffers, size_t bytes_per_buffer) {
- std::vector<uint8> buffer(bytes_per_buffer + kGuardBytes * 2);
- buffers_.resize(number_of_buffers, buffer);
- ScribbleContent();
- }
-
- void ScribbleContent() {
- for (size_t i = 0; i < buffers_.size(); ++i) {
- Scribble(&buffers_[i][0], kGuardBytes);
- Scribble(&buffers_[i][buffers_[i].size() - kGuardBytes], kGuardBytes);
- Scribble(&buffers_[i][kGuardBytes], buffers_[i].size() - kGuardBytes * 2);
- }
- }
-
- bool VerifyGuardBytes() const {
- for (size_t i = 0; i < buffers_.size(); ++i) {
- if (!VerifyScribble(&buffers_[i][0], kGuardBytes) ||
- !VerifyScribble(&buffers_[i][buffers_[i].size() - kGuardBytes],
- kGuardBytes)) {
- return false;
- }
- }
- return true;
- }
-
- template <typename T>
- T* GetBuffer(size_t index) {
- return reinterpret_cast<T*>(&buffers_.at(index)[kGuardBytes]);
- }
-
- private:
- typedef std::vector<uint8> Buffer;
- typedef std::vector<Buffer> Buffers;
-
- static const size_t kGuardBytes = 256;
-
- Buffers buffers_;
-};
-
-class ShellAudioBusTest : public ::testing::Test {
- public:
- typedef ShellAudioBus::SampleType SampleType;
- typedef ShellAudioBus::StorageType StorageType;
-
- ~ShellAudioBusTest() {
- // We do an extra call to VerifyGuardBytes() just in case if it was omitted
- // in a test. It is still recommended to verify the integrity in individual
- // tests so the failure message is more informational.
- if (guarded_buffers_) {
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
- }
-
- template <typename T>
- void CreateAudioBus(StorageType storage_type) {
- // We do an extra call to VerifyGuardBytes() just in case if it was omitted
- // in a test. It is still recommended to verify the integrity in individual
- // tests so the failure message is more informational.
- if (guarded_buffers_) {
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
- if (storage_type == ShellAudioBus::kPlanar) {
- guarded_buffers_.reset(
- new GuardedBuffers(kChannels, sizeof(T) * kFrames));
- std::vector<T*> samples;
- for (size_t channel = 0; channel < kChannels; ++channel) {
- samples.push_back(guarded_buffers_->GetBuffer<T>(channel));
- }
- audio_bus_.reset(new ShellAudioBus(kFrames, samples));
- } else {
- guarded_buffers_.reset(
- new GuardedBuffers(1, sizeof(T) * kFrames * kChannels));
- audio_bus_.reset(new ShellAudioBus(kChannels, kFrames,
- guarded_buffers_->GetBuffer<T>(0)));
- }
- }
-
- protected:
- static const size_t kChannels;
- static const size_t kFrames;
-
- scoped_ptr<GuardedBuffers> guarded_buffers_;
- scoped_ptr<ShellAudioBus> audio_bus_;
-};
-
-const size_t ShellAudioBusTest::kChannels = 3;
-const size_t ShellAudioBusTest::kFrames = 809;
-
-TEST_F(ShellAudioBusTest, ConstructorWithAllocation) {
- ShellAudioBus audio_bus(kChannels, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kPlanar);
- EXPECT_EQ(audio_bus.channels(), kChannels);
- EXPECT_EQ(audio_bus.frames(), kFrames);
- for (size_t channel = 0; channel < audio_bus.channels(); ++channel) {
- const uint8* samples = audio_bus.planar_data(channel);
- EXPECT_EQ(reinterpret_cast<intptr_t>(samples) %
- ShellAudioBus::kChannelAlignmentInBytes,
- 0);
- }
-}
-
-TEST_F(ShellAudioBusTest, ConstructorWithoutAllocation) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- EXPECT_EQ(audio_bus_->channels(), kChannels);
- EXPECT_EQ(audio_bus_->frames(), kFrames);
- const float* samples =
- reinterpret_cast<const float*>(audio_bus_->interleaved_data());
- EXPECT_EQ(samples, guarded_buffers_->GetBuffer<float>(0));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- CreateAudioBus<int16>(ShellAudioBus::kPlanar);
- EXPECT_EQ(audio_bus_->channels(), kChannels);
- EXPECT_EQ(audio_bus_->frames(), kFrames);
- for (size_t channel = 0; channel < audio_bus_->channels(); ++channel) {
- const int16* samples =
- reinterpret_cast<const int16*>(audio_bus_->planar_data(channel));
- EXPECT_EQ(samples, guarded_buffers_->GetBuffer<int16>(channel));
- }
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-}
-
-TEST_F(ShellAudioBusTest, GetSampleSizeInBytes) {
- ShellAudioBus float_audio_bus(2, 4, ShellAudioBus::kFloat32,
- ShellAudioBus::kPlanar);
- EXPECT_EQ(float_audio_bus.GetSampleSizeInBytes(), sizeof(float));
- ShellAudioBus int16_audio_bus(2, 4, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- EXPECT_EQ(int16_audio_bus.GetSampleSizeInBytes(), sizeof(int16));
-}
-
-TEST_F(ShellAudioBusTest, GetSample) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- // Mark the first sample of the first channel as 1.f.
- guarded_buffers_->GetBuffer<float>(0)[0] = 1.f;
- // Mark the last sample of the last channel as 1.f.
- guarded_buffers_->GetBuffer<float>(0)[kFrames * kChannels - 1] = 1.f;
- EXPECT_EQ(audio_bus_->GetFloat32Sample(0, 0), 1.f);
- EXPECT_EQ(audio_bus_->GetFloat32Sample(kChannels - 1, kFrames - 1), 1.f);
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- CreateAudioBus<int16>(ShellAudioBus::kPlanar);
- for (size_t channel = 0; channel < audio_bus_->channels(); ++channel) {
- // Mark the first sample of the channel as 100.
- guarded_buffers_->GetBuffer<int16>(channel)[0] = 100;
- // Mark the last sample of the channel as 100.
- guarded_buffers_->GetBuffer<int16>(channel)[kFrames - 1] = 100;
- EXPECT_EQ(audio_bus_->GetInt16Sample(channel, 0), 100);
- EXPECT_EQ(audio_bus_->GetInt16Sample(channel, kFrames - 1), 100);
- }
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-}
-
-TEST_F(ShellAudioBusTest, ZeroFrames) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- Scribble(guarded_buffers_->GetBuffer<uint8>(0),
- sizeof(float) * kFrames * kChannels);
- audio_bus_->ZeroFrames(0, 0);
- audio_bus_->ZeroFrames(kFrames, kFrames);
- EXPECT_TRUE(VerifyScribble(guarded_buffers_->GetBuffer<uint8>(0),
- sizeof(float) * kFrames * kChannels));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- // Mark the first sample of last channel and second sample of the first
- // channel as 1.f.
- guarded_buffers_->GetBuffer<float>(0)[kChannels - 1] = 1.f;
- guarded_buffers_->GetBuffer<float>(0)[kChannels] = 1.f;
- audio_bus_->ZeroFrames(0, 1);
- EXPECT_EQ(guarded_buffers_->GetBuffer<float>(0)[kChannels - 1], 0.f);
- EXPECT_EQ(guarded_buffers_->GetBuffer<float>(0)[kChannels], 1.f);
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- // Mark the first sample of last channel and second but last sample of the
- // first channel as 1.f.
- guarded_buffers_->GetBuffer<float>(0)[kChannels * (kFrames - 1)] = 1.f;
- guarded_buffers_->GetBuffer<float>(0)[kChannels * (kFrames - 1) - 1] = 1.f;
- audio_bus_->ZeroFrames(kFrames - 1, kFrames);
- EXPECT_EQ(guarded_buffers_->GetBuffer<float>(0)[kChannels * (kFrames - 1)],
- 0.f);
- EXPECT_EQ(
- guarded_buffers_->GetBuffer<float>(0)[kChannels * (kFrames - 1) - 1],
- 1.f);
-
- audio_bus_->ZeroFrames(1, kFrames - 1);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames * kChannels, 0.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- CreateAudioBus<int16>(ShellAudioBus::kPlanar);
- for (size_t channel = 0; channel < audio_bus_->channels(); ++channel) {
- // Mark the first two samples of the channel as 100.
- guarded_buffers_->GetBuffer<int16>(channel)[0] = 100;
- guarded_buffers_->GetBuffer<int16>(channel)[1] = 100;
- audio_bus_->ZeroFrames(0, 1);
- EXPECT_EQ(guarded_buffers_->GetBuffer<int16>(channel)[0], 0);
- EXPECT_EQ(guarded_buffers_->GetBuffer<int16>(channel)[1], 100);
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- // Mark the last two samples of the channel as 100.
- guarded_buffers_->GetBuffer<int16>(channel)[kFrames - 1] = 100;
- guarded_buffers_->GetBuffer<int16>(channel)[kFrames - 2] = 100;
- audio_bus_->ZeroFrames(kFrames - 1, kFrames);
- EXPECT_EQ(guarded_buffers_->GetBuffer<int16>(channel)[kFrames - 1], 0);
- EXPECT_EQ(guarded_buffers_->GetBuffer<int16>(channel)[kFrames - 2], 100);
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
-
- audio_bus_->ZeroFrames(1, kFrames - 1);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames, static_cast<int16>(0)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-}
-
-TEST_F(ShellAudioBusTest, AssignWithSameSampleTypeAndStorageType) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- FillValues(guarded_buffers_->GetBuffer<float>(0), kFrames * kChannels, 1.f);
- {
- ShellAudioBus source(kChannels, kFrames / 2, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames / 2 * kChannels, 0.f));
- EXPECT_TRUE(VerifyValues(
- guarded_buffers_->GetBuffer<float>(0) + kFrames / 2 * kChannels,
- (kFrames - kFrames / 2) * kChannels, 1.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames * kChannels, 0.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames * 2, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames * kChannels, 0.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- CreateAudioBus<int16>(ShellAudioBus::kPlanar);
- for (size_t channel = 0; channel < audio_bus_->channels(); ++channel) {
- FillValues(guarded_buffers_->GetBuffer<int16>(channel), kFrames,
- static_cast<int16>(100));
- {
- ShellAudioBus source(kChannels, kFrames / 2, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames / 2, static_cast<int16>(0)));
- EXPECT_TRUE(VerifyValues(
- guarded_buffers_->GetBuffer<int16>(channel) + kFrames / 2,
- kFrames - kFrames / 2, static_cast<int16>(100)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames, static_cast<int16>(0)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames * 2, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames, static_cast<int16>(0)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
- }
-}
-
-TEST_F(ShellAudioBusTest, AssignWithDifferentSampleTypesAndStorageTypes) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- FillValues(guarded_buffers_->GetBuffer<float>(0), kFrames * kChannels, 1.f);
- {
- ShellAudioBus source(kChannels, kFrames / 2, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames / 2 * kChannels, 0.f));
- EXPECT_TRUE(VerifyValues(
- guarded_buffers_->GetBuffer<float>(0) + kFrames / 2 * kChannels,
- (kFrames - kFrames / 2) * kChannels, 1.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames * kChannels, 0.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames * 2, ShellAudioBus::kInt16,
- ShellAudioBus::kPlanar);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<float>(0),
- kFrames * kChannels, 0.f));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- CreateAudioBus<int16>(ShellAudioBus::kPlanar);
- for (size_t channel = 0; channel < audio_bus_->channels(); ++channel) {
- FillValues(guarded_buffers_->GetBuffer<int16>(channel), kFrames,
- static_cast<int16>(100));
- {
- ShellAudioBus source(kChannels, kFrames / 2, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames / 2, static_cast<int16>(0)));
- EXPECT_TRUE(VerifyValues(
- guarded_buffers_->GetBuffer<int16>(channel) + kFrames / 2,
- kFrames - kFrames / 2, static_cast<int16>(100)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames, static_cast<int16>(0)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-
- {
- ShellAudioBus source(kChannels, kFrames * 2, ShellAudioBus::kFloat32,
- ShellAudioBus::kInterleaved);
- source.ZeroAllFrames();
- audio_bus_->Assign(source);
- EXPECT_TRUE(VerifyValues(guarded_buffers_->GetBuffer<int16>(channel),
- kFrames, static_cast<int16>(0)));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
- }
-}
-
-TEST_F(ShellAudioBusTest, AssignWithMatrix) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- // Fill the 1st channel with incrementally positive samples, fill the 2nd
- // channel with decrementally negative samples, and fill the 3rd channel with
- // 0.
- for (size_t channel = 0; channel < kChannels; ++channel) {
- for (size_t frame = 0; frame < kFrames; ++frame) {
- float sample = 0.f;
- if (channel == 0) {
- sample = static_cast<float>(frame) / static_cast<float>(kFrames);
- } else if (channel == 1) {
- sample = -static_cast<float>(frame) / static_cast<float>(kFrames);
- }
- guarded_buffers_->GetBuffer<float>(0)[frame * kChannels + channel] =
- sample;
- }
- }
- {
- ShellAudioBus other(kChannels * 2, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kPlanar);
- const float kConversionMatrix[] = {
- 0.f, 1.f, 0.f, // dest channel 0 = source channel 1.
- 1.f, 0.f, 0.f, // dest channel 1 = source channel 0.
- 1.f, 1.f, 1.f, // dest channel 2 = sum of all source channels,
- // effectively make it equal to source channel 2.
- -1.f, 0.f, 0.f, // dest channel 3 = negative source channel 0,
- // effectively make it equal to source channel 1.
- 0.f, -1.f, 0.f, // dest channel 4 = negative source channel 1,
- // effectively make it equal to source channel 0.
- 2.f, 1.f, -1.f, // dest channel 5 = 2 * source channel 0 + source
- // channel 2 - source channel 2, effectively make it
- // equal to source channel 0.
- };
- std::vector<float> matrix(kConversionMatrix,
- kConversionMatrix + arraysize(kConversionMatrix));
- other.Assign(*audio_bus_, matrix);
-
- EXPECT_TRUE(IsSameChannel(other, 0, *audio_bus_, 1));
- EXPECT_TRUE(IsSameChannel(other, 1, *audio_bus_, 0));
- EXPECT_TRUE(IsSameChannel(other, 2, *audio_bus_, 2));
- EXPECT_TRUE(IsSameChannel(other, 3, *audio_bus_, 1));
- EXPECT_TRUE(IsSameChannel(other, 4, *audio_bus_, 0));
- EXPECT_TRUE(IsSameChannel(other, 5, *audio_bus_, 0));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-}
-
-TEST_F(ShellAudioBusTest, MixWithoutMatrix) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- // Fill the 1st channel with incrementally positive samples, fill the 2nd
- // channel with decrementally negative samples, and fill the 3rd channel with
- // 0.
- for (size_t channel = 0; channel < kChannels; ++channel) {
- for (size_t frame = 0; frame < kFrames; ++frame) {
- float sample = 0.f;
- if (channel == 0) {
- sample = static_cast<float>(frame) / static_cast<float>(kFrames);
- } else if (channel == 1) {
- sample = -static_cast<float>(frame) / static_cast<float>(kFrames);
- }
- guarded_buffers_->GetBuffer<float>(0)[frame * kChannels + channel] =
- sample;
- }
- }
- {
- ShellAudioBus other(kChannels, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kPlanar);
- other.Assign(*audio_bus_);
- // By call Mix() here, we effectively multiplies every sample in |other| by
- // 2.
- other.Mix(*audio_bus_);
- // Adjust the original audio bus by multiplying every sample by 2.
- for (size_t channel = 0; channel < kChannels; ++channel) {
- for (size_t frame = 0; frame < kFrames; ++frame) {
- guarded_buffers_->GetBuffer<float>(0)[frame * kChannels + channel] *= 2;
- }
- }
-
- EXPECT_TRUE(IsSameChannel(other, 0, *audio_bus_, 0));
- EXPECT_TRUE(IsSameChannel(other, 1, *audio_bus_, 1));
- EXPECT_TRUE(IsSameChannel(other, 2, *audio_bus_, 2));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-}
-
-TEST_F(ShellAudioBusTest, MixWithMatrix) {
- CreateAudioBus<float>(ShellAudioBus::kInterleaved);
- // Fill the 1st channel with incrementally positive samples, fill the 2nd
- // channel with decrementally negative samples, and fill the 3rd channel with
- // 0.
- for (size_t channel = 0; channel < kChannels; ++channel) {
- for (size_t frame = 0; frame < kFrames; ++frame) {
- float sample = 0.f;
- if (channel == 0) {
- sample = static_cast<float>(frame) / static_cast<float>(kFrames);
- } else if (channel == 1) {
- sample = -static_cast<float>(frame) / static_cast<float>(kFrames);
- }
- guarded_buffers_->GetBuffer<float>(0)[frame * kChannels + channel] =
- sample;
- }
- }
- {
- ShellAudioBus other(kChannels * 2, kFrames, ShellAudioBus::kFloat32,
- ShellAudioBus::kPlanar);
- const float kConversionMatrix[] = {
- 0.f, 1.f, 0.f, // dest channel 0 = source channel 1.
- 1.f, 0.f, 0.f, // dest channel 1 = source channel 0.
- 1.f, 1.f, 1.f, // dest channel 2 = sum of all source channels,
- // effectively make it equal to source channel 2.
- -1.f, 0.f, 0.f, // dest channel 3 = negative source channel 0,
- // effectively make it equal to source channel 1.
- 0.f, -1.f, 0.f, // dest channel 4 = negative source channel 1,
- // effectively make it equal to source channel 0.
- 2.f, 1.f, -1.f, // dest channel 5 = 2 * source channel 0 + source
- // channel 2 - source channel 2, effectively make it
- // equal to source channel 0.
- };
- std::vector<float> matrix(kConversionMatrix,
- kConversionMatrix + arraysize(kConversionMatrix));
- other.Assign(*audio_bus_, matrix);
- // By call Mix() here, we effectively multiplies every sample in |other| by
- // 2.
- other.Mix(*audio_bus_, matrix);
- // Adjust the original audio bus by multiplying every sample by 2.
- for (size_t channel = 0; channel < kChannels; ++channel) {
- for (size_t frame = 0; frame < kFrames; ++frame) {
- guarded_buffers_->GetBuffer<float>(0)[frame * kChannels + channel] *= 2;
- }
- }
-
- EXPECT_TRUE(IsSameChannel(other, 0, *audio_bus_, 1));
- EXPECT_TRUE(IsSameChannel(other, 1, *audio_bus_, 0));
- EXPECT_TRUE(IsSameChannel(other, 2, *audio_bus_, 2));
- EXPECT_TRUE(IsSameChannel(other, 3, *audio_bus_, 1));
- EXPECT_TRUE(IsSameChannel(other, 4, *audio_bus_, 0));
- EXPECT_TRUE(IsSameChannel(other, 5, *audio_bus_, 0));
- EXPECT_TRUE(guarded_buffers_->VerifyGuardBytes());
- }
-}
-
-} // namespace
-} // namespace testing
-} // namespace media
diff --git a/src/media/base/shell_buffer_factory.cc b/src/media/base/shell_buffer_factory.cc
deleted file mode 100644
index dc78fe9..0000000
--- a/src/media/base/shell_buffer_factory.cc
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_buffer_factory.h"
-
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/stringprintf.h"
-#include "media/base/decrypt_config.h"
-#include "media/base/shell_media_platform.h"
-#include "media/base/shell_media_statistics.h"
-
-namespace media {
-
-// ==== ShellScopedArray =======================================================
-
-ShellScopedArray::ShellScopedArray(uint8* reusable_buffer, size_t size)
- : array_(reusable_buffer), size_(size) {
- if (array_) {
- // Retain a reference to the buffer factory, to ensure that we do not
- // outlive it.
- buffer_factory_ = ShellBufferFactory::Instance();
- }
-}
-
-ShellScopedArray::~ShellScopedArray() {
- TRACE_EVENT0("media_stack", "ShellScopedArray::~ShellScopedArray()");
- if (array_) {
- buffer_factory_->Reclaim(array_);
- }
-}
-
-// ==== ShellBufferFactory =====================================================
-
-scoped_refptr<ShellBufferFactory> ShellBufferFactory::instance_ = NULL;
-
-// static
-void ShellBufferFactory::Initialize() {
- // safe to call multiple times
- if (!instance_) {
- instance_ = new ShellBufferFactory();
- }
-}
-
-bool ShellBufferFactory::AllocateBuffer(size_t size,
- bool is_keyframe,
- AllocCB cb) {
- TRACE_EVENT1("media_stack", "ShellBufferFactory::AllocateBuffer()", "size",
- size);
- // Zero-size buffers are allocation error, allocate an EOS buffer explicity
- // with the provided EOS method.
- if (size == 0) {
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateBuffer() failed as size is 0.");
- return false;
- }
-
- // If we can allocate a buffer right now save a pointer to it so that we don't
- // run the callback while holding the memory lock, for safety's sake.
- scoped_refptr<DecoderBuffer> instant_buffer = NULL;
-
- {
- base::AutoLock lock(lock_);
- // We only service requests directly if there's no callbacks pending and
- // we can accommodate a buffer of the requested size
- if (pending_allocs_.size() == 0) {
- uint8* bytes = Allocate_Locked(size);
- if (bytes) {
- instant_buffer = new DecoderBuffer(bytes, size, is_keyframe);
- TRACE_EVENT0(
- "media_stack",
- "ShellBufferFactory::AllocateBuffer() finished allocation.");
- DCHECK(!instant_buffer->IsEndOfStream());
- }
- }
- if (!instant_buffer) {
- // Alright, we have to wait, enqueue the buffer and size.
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateBuffer() deferred.");
- pending_allocs_.push_back(
- std::make_pair(cb, new DecoderBuffer(NULL, size, is_keyframe)));
- }
- }
-
- // If we managed to create a buffer run the callback after releasing the lock.
- if (instant_buffer) {
- cb.Run(instant_buffer);
- }
- return true;
-}
-
-scoped_refptr<DecoderBuffer> ShellBufferFactory::AllocateBufferNow(
- size_t size,
- bool is_keyframe) {
- TRACE_EVENT1("media_stack", "ShellBufferFactory::AllocateBufferNow()", "size",
- size);
- // Zero-size buffers are allocation error, allocate an EOS buffer explicity
- // with the provided EOS method.
- if (size == 0) {
- TRACE_EVENT0(
- "media_stack",
- "ShellBufferFactory::AllocateBufferNow() failed as size is 0.");
- return NULL;
- }
-
- base::AutoLock lock(lock_);
- uint8* bytes = Allocate_Locked(size);
- if (!bytes) {
- TRACE_EVENT0(
- "media_stack",
- "ShellBufferFactory::AllocateBufferNow() failed as size is too large.");
- return NULL;
- }
- scoped_refptr<DecoderBuffer> buffer =
- new DecoderBuffer(bytes, size, is_keyframe);
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateBufferNow() finished allocation.");
- DCHECK(!buffer->IsEndOfStream());
-
- return buffer;
-}
-
-uint8* ShellBufferFactory::AllocateNow(size_t size) {
- // we skip to the head of the line for these allocations, if there's
- // room we allocate it.
- base::AutoLock lock(lock_);
- uint8* bytes = Allocate_Locked(size);
-
- if (!bytes) {
- DLOG(ERROR) << base::StringPrintf("Failed to allocate %d bytes!",
- (int)size);
- }
-
- return bytes;
-}
-
-scoped_refptr<ShellScopedArray> ShellBufferFactory::AllocateArray(size_t size) {
- TRACE_EVENT1("media_stack", "ShellBufferFactory::AllocateArray()", "size",
- size);
- uint8* allocated_bytes = NULL;
- if (size == 0) {
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateArray() failed as size is 0.");
- return NULL;
- }
-
- if (size <= kShellMaxArraySize) {
- base::AutoLock lock(lock_);
- // there should not already be somebody waiting on an array
- if (array_requested_size_ > 0) {
- TRACE_EVENT0(
- "media_stack",
- "ShellBufferFactory::AllocateArray() failed as another allocation is"
- " in progress.");
- NOTREACHED() << "Max one thread blocking on array allocation at a time.";
- return NULL;
- }
- // Attempt to allocate.
- allocated_bytes = Allocate_Locked(size);
- // If we don't have room save state while we still have the lock
- if (!allocated_bytes) {
- array_requested_size_ = size;
- }
- } else { // oversized requests always fail instantly.
- TRACE_EVENT0(
- "media_stack",
- "ShellBufferFactory::AllocateArray() failed as size is too large.");
- return NULL;
- }
- // Lock is released. Now safe to block this thread if we need to.
- if (!allocated_bytes) {
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateArray() deferred.");
- // Wait until enough memory has been released to service this allocation.
- array_allocation_event_.Wait();
- {
- // acquire lock to get address and clear requested size
- base::AutoLock lock(lock_);
- // make sure this allocation makes sense
- DCHECK_EQ(size, array_requested_size_);
- DCHECK(array_allocation_);
- allocated_bytes = array_allocation_;
- array_allocation_ = NULL;
- array_requested_size_ = 0;
- }
- }
- // Whether we blocked or not we should now have a pointer
- DCHECK(allocated_bytes);
- TRACE_EVENT0("media_stack",
- "ShellBufferFactory::AllocateArray() finished allocation.");
- return scoped_refptr<ShellScopedArray>(
- new ShellScopedArray(allocated_bytes, size));
-}
-
-void ShellBufferFactory::Reclaim(uint8* p) {
- TRACE_EVENT0("media_stack", "ShellBufferFactory::Reclaim()");
- typedef std::list<std::pair<AllocCB, scoped_refptr<DecoderBuffer> > >
- FinishList;
- FinishList finished_allocs;
-
- // Reclaim() on a NULL buffer is a no-op, don't even acquire the lock.
- if (p) {
- base::AutoLock lock(lock_);
- ShellMediaPlatform::Instance()->FreeBuffer(p);
-
- // Try to service a blocking array request if there is one, and it hasn't
- // already been serviced. If we can't service it then we won't allocate any
- // additional ShellBuffers as arrays get priority treatment.
- bool service_buffers = true;
- if (array_requested_size_ > 0 && !array_allocation_) {
- array_allocation_ = Allocate_Locked(array_requested_size_);
- if (array_allocation_) {
- // Wake up blocked thread
- array_allocation_event_.Signal();
- } else {
- // Not enough room for the array so don't give away what room we have
- // to the buffers.
- service_buffers = false;
- }
- }
- // Try to process any enqueued allocs in FIFO order until we run out of room
- while (service_buffers && pending_allocs_.size()) {
- size_t size = pending_allocs_.front().second->GetAllocatedSize();
- uint8* bytes = Allocate_Locked(size);
- if (bytes) {
- scoped_refptr<DecoderBuffer> alloc_buff =
- pending_allocs_.front().second;
- alloc_buff->SetBuffer(bytes);
- TRACE_EVENT1("media_stack",
- "ShellBufferFactory::Reclaim() finished allocation.",
- "size", size);
- finished_allocs.push_back(
- std::make_pair(pending_allocs_.front().first, alloc_buff));
- pending_allocs_.pop_front();
- } else {
- service_buffers = false;
- }
- }
- }
- // OK, lock released, do callbacks for finished allocs
- for (FinishList::iterator it = finished_allocs.begin();
- it != finished_allocs.end(); ++it) {
- it->first.Run(it->second);
- }
-}
-
-uint8* ShellBufferFactory::Allocate_Locked(size_t size) {
- // should have acquired the lock already
- lock_.AssertAcquired();
- UPDATE_MEDIA_STATISTICS(STAT_TYPE_ALLOCATED_SHELL_BUFFER_SIZE, size);
- return static_cast<uint8*>(
- ShellMediaPlatform::Instance()->AllocateBuffer(size));
-}
-
-// static
-void ShellBufferFactory::Terminate() {
- instance_ = NULL;
-}
-
-ShellBufferFactory::ShellBufferFactory()
- : array_allocation_event_(false, false),
- array_requested_size_(0),
- array_allocation_(NULL) {}
-
-// Will be called when all ShellBuffers have been deleted AND instance_ has
-// been set to NULL.
-ShellBufferFactory::~ShellBufferFactory() {
- // and no outstanding array requests
- DCHECK_EQ(array_requested_size_, 0);
-}
-
-} // namespace media
diff --git a/src/media/base/shell_buffer_factory.h b/src/media/base/shell_buffer_factory.h
deleted file mode 100644
index 8a18156..0000000
--- a/src/media/base/shell_buffer_factory.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_BUFFER_FACTORY_H_
-#define MEDIA_BASE_SHELL_BUFFER_FACTORY_H_
-
-#include <list>
-#include <map>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-static const size_t kShellMaxArraySize = 1024 * 1024;
-
-class DecoderBuffer;
-class DecryptConfig;
-class ShellBufferFactory;
-
-// A simple scoped array class designed to re-use the memory allocated by
-// ShellBufferFactory. If needed would be trivial to make generic.
-class MEDIA_EXPORT ShellScopedArray
- : public base::RefCountedThreadSafe<ShellScopedArray> {
- public:
- uint8* Get() { return array_; }
- size_t Size() { return size_; }
-
- private:
- friend class base::RefCountedThreadSafe<ShellScopedArray>;
- friend class ShellBufferFactory;
- // Should only be called by ShellBufferFactory, consumers should use
- // ShellBufferFactory::AllocateArray to allocate a ShellScopedArray
- ShellScopedArray(uint8* resuable_buffer, size_t size);
- virtual ~ShellScopedArray();
- uint8* array_;
- size_t size_;
- scoped_refptr<ShellBufferFactory> buffer_factory_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ShellScopedArray);
-};
-
-// Singleton instance class for the management and recycling of media-related
-// buffers. It is assumed that the usage pattern of these buffers is that they
-// are used for only a short amount of time, and their allocation is roughly
-// cyclical. All buffers are allocated using the alignment constants defined
-// above, and all buffers are allocated from the shared pool of memory of
-// size defined above.
-class MEDIA_EXPORT ShellBufferFactory
- : public base::RefCountedThreadSafe<ShellBufferFactory> {
- public:
- static void Initialize();
- static inline scoped_refptr<ShellBufferFactory> Instance() {
- return instance_;
- }
-
- typedef base::Callback<void(scoped_refptr<DecoderBuffer>)> AllocCB;
- // Returns false if the allocator will never be able to allocate a buffer
- // of the requested size. Note that if memory is currently available this
- // function will call the callback provided _before_ returning true.
- bool AllocateBuffer(size_t size, bool is_keyframe, AllocCB cb);
- // This function tries to allocate a DecoderBuffer immediately. It returns
- // NULL on failure.
- scoped_refptr<DecoderBuffer> AllocateBufferNow(size_t size, bool is_keyframe);
- // Returns a newly allocated byte field if there's room for it, or NULL if
- // there isn't. Note that this raw allocation method provides no guarantee
- // that ShellBufferFactory will still exist when the memory is to be freed.
- // If that is important please retain a reference to the buffer factory
- // (using Instance()) until the memory is to be reclaimed.
- uint8* AllocateNow(size_t size);
- // BLOCKS THE CALLING THREAD until an array of size is available and can be
- // allocated. We only allow one thread to block on an array allocation at a
- // time, all subsequents calls on other threads to AllocateArray will assert
- // and return NULL.
- scoped_refptr<ShellScopedArray> AllocateArray(size_t size);
-
- // Only called by DecoderBuffer and ShellScopedArray, informs the factory
- // that these objects have gone out of scoped and we can reclaim the memory
- void Reclaim(uint8* p);
-
- static void Terminate();
-
- private:
- friend class base::RefCountedThreadSafe<ShellBufferFactory>;
- ShellBufferFactory();
- ~ShellBufferFactory();
- uint8* Allocate_Locked(size_t aligned_size);
-
- static scoped_refptr<ShellBufferFactory> instance_;
-
- // protects all following members.
- base::Lock lock_;
-
- // queue of pending buffer allocation requests and their sizes
- typedef std::list<std::pair<AllocCB, scoped_refptr<DecoderBuffer> > >
- AllocList;
- AllocList pending_allocs_;
-
- // event used for blocking calls for array allocation
- base::WaitableEvent array_allocation_event_;
- // set to 0 when no thread is blocking on an array allocation
- size_t array_requested_size_;
- // set to an allocation address when allocation has succeeded
- uint8* array_allocation_;
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_BUFFER_FACTORY_H_
diff --git a/src/media/base/shell_cached_decoder_buffer.cc b/src/media/base/shell_cached_decoder_buffer.cc
deleted file mode 100644
index 28873ba..0000000
--- a/src/media/base/shell_cached_decoder_buffer.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_cached_decoder_buffer.h"
-
-namespace media {
-
-ShellCachedDecoderBuffer::ShellCachedDecoderBuffer(
- const scoped_refptr<media::DecoderBuffer>& source_buffer,
- void* destination,
- FreeCB free_cb)
- : media::DecoderBuffer(NULL, 0, source_buffer->IsKeyframe()),
- source_buffer_(source_buffer),
- free_cb_(free_cb) {
- DCHECK(source_buffer);
- DCHECK(destination);
- DCHECK(!free_cb.is_null());
- DCHECK(!source_buffer->IsEndOfStream());
-
- SetTimestamp(source_buffer->GetTimestamp());
- SetDuration(source_buffer->GetDuration());
-
- buffer_ = static_cast<uint8*>(destination);
- memcpy(buffer_, source_buffer_->GetData(), source_buffer_->GetDataSize());
- size_ = source_buffer_->GetDataSize();
-
- // The buffer is not expandable.
- allocated_size_ = source_buffer_->GetDataSize();
- is_decrypted_ = source_buffer_->IsAlreadyDecrypted();
-}
-
-ShellCachedDecoderBuffer::~ShellCachedDecoderBuffer() {
- free_cb_.Run(buffer_);
- // Set the buffer_ to NULL to stop the base class dtor from freeing it.
- buffer_ = NULL;
-}
-
-} // namespace media
diff --git a/src/media/base/shell_cached_decoder_buffer.h b/src/media/base/shell_cached_decoder_buffer.h
deleted file mode 100644
index db3baad..0000000
--- a/src/media/base/shell_cached_decoder_buffer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_SHELL_CACHED_DECODER_BUFFER_H_
-#define MEDIA_SHELL_CACHED_DECODER_BUFFER_H_
-
-#include "base/callback.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/decoder_buffer.h"
-
-namespace media {
-
-// This class can cache the content of |source_buffer| into main memory. So it
-// is possible to store DecoderBuffer in memory space that cannot be accessed by
-// the decoder and copy them over to main memory just before they are decoded.
-class ShellCachedDecoderBuffer : public media::DecoderBuffer {
- public:
- // Callback to free memory passed in.
- typedef base::Callback<void(void*)> FreeCB;
-
- ShellCachedDecoderBuffer(
- const scoped_refptr<media::DecoderBuffer>& source_buffer,
- void* destination,
- FreeCB free_cb);
- ~ShellCachedDecoderBuffer();
-
- const media::DecryptConfig* GetDecryptConfig() const OVERRIDE {
- return source_buffer_->GetDecryptConfig();
- }
- void SetDecryptConfig(scoped_ptr<media::DecryptConfig>) OVERRIDE {
- NOTREACHED();
- }
-
- private:
- scoped_refptr<media::DecoderBuffer> source_buffer_;
- FreeCB free_cb_;
-};
-
-} // namespace media
-
-#endif // MEDIA_SHELL_CACHED_DECODER_BUFFER_H_
diff --git a/src/media/base/shell_data_source_reader.cc b/src/media/base/shell_data_source_reader.cc
deleted file mode 100644
index 00e65f1..0000000
--- a/src/media/base/shell_data_source_reader.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_data_source_reader.h"
-
-#include <limits.h> // for ULLONG_MAX
-
-namespace media {
-
-const int ShellDataSourceReader::kReadError = DataSource::kReadError;
-
-ShellDataSourceReader::ShellDataSourceReader()
- : data_source_(NULL),
- blocking_read_event_(false, false),
- file_size_(-1),
- read_has_failed_(false),
- last_bytes_read_(0) {}
-
-ShellDataSourceReader::~ShellDataSourceReader() {}
-
-void ShellDataSourceReader::SetDataSource(DataSource* data_source) {
- DCHECK(data_source);
- data_source_ = data_source;
-}
-
-// currently only single-threaded reads supported
-int ShellDataSourceReader::BlockingRead(int64 position, int size, uint8* data) {
- // read failures are unrecoverable, all subsequent reads will also fail
- if (read_has_failed_) {
- return kReadError;
- }
-
- // check bounds of read at or past EOF
- if (file_size_ >= 0 && position >= file_size_) {
- return 0;
- }
-
- int total_bytes_read = 0;
- while (size > 0 && !read_has_failed_) {
- {
- base::AutoLock auto_lock(lock_);
- if (!data_source_) {
- break;
- }
- data_source_->Read(
- position, size, data,
- base::Bind(&ShellDataSourceReader::BlockingReadCompleted, this));
- }
-
- // wait for callback on read completion
- blocking_read_event_.Wait();
-
- if (last_bytes_read_ == DataSource::kReadError) {
- // make all future reads fail
- read_has_failed_ = true;
- return kReadError;
- }
-
- DCHECK_LE(last_bytes_read_, size);
- if (last_bytes_read_ > size) {
- // make all future reads fail
- read_has_failed_ = true;
- return kReadError;
- }
-
- // Avoid entering an endless loop here.
- if (last_bytes_read_ == 0)
- break;
-
- total_bytes_read += last_bytes_read_;
- position += last_bytes_read_;
- size -= last_bytes_read_;
- data += last_bytes_read_;
- }
-
- if (read_has_failed_) {
- return kReadError;
- }
- return total_bytes_read;
-}
-
-void ShellDataSourceReader::Stop(const base::Closure& callback) {
- if (data_source_) {
- // stop the data source, it can call the callback
- data_source_->Stop();
-
- base::AutoLock auto_lock(lock_);
- data_source_ = NULL;
- }
- callback.Run();
-}
-
-void ShellDataSourceReader::BlockingReadCompleted(int bytes_read) {
- last_bytes_read_ = bytes_read;
- // wake up blocked thread
- blocking_read_event_.Signal();
-}
-
-int64 ShellDataSourceReader::FileSize() {
- if (file_size_ == -1) {
- base::AutoLock auto_lock(lock_);
- if (data_source_ && !data_source_->GetSize(&file_size_)) {
- file_size_ = -1;
- }
- }
- return file_size_;
-}
-
-} // namespace media
diff --git a/src/media/base/shell_data_source_reader.h b/src/media/base/shell_data_source_reader.h
deleted file mode 100644
index 9e50be7..0000000
--- a/src/media/base/shell_data_source_reader.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_DATA_SOURCE_READER_H_
-#define MEDIA_BASE_SHELL_DATA_SOURCE_READER_H_
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/message_loop.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "media/base/data_source.h"
-
-namespace media {
-
-// Allows sharing of a DataSource object between multiple objects on a single
-// thread, and exposes a simple BlockingRead() method to block the thread until
-// data is available or error. To avoid circular smart pointer references this
-// object is also the sole owner of a pointer to DataSource. If we want to add
-// asynchronous reading to this object it will need its own thread and a
-// callback queue.
-class ShellDataSourceReader
- : public base::RefCountedThreadSafe<ShellDataSourceReader> {
- public:
- static const int kReadError;
-
- ShellDataSourceReader();
- virtual void SetDataSource(DataSource* data_source);
-
- // Block the calling thread's message loop until read is complete.
- // returns number of bytes read or kReadError on error.
- // Currently only single-threaded support.
- virtual int BlockingRead(int64 position, int size, uint8* data);
-
- // returns size of file in bytes, or -1 if file size not known. If error will
- // retry getting file size on subsequent calls to FileSize().
- virtual int64 FileSize();
-
- // abort any pending read, then stop the data source
- virtual void Stop(const base::Closure& callback);
-
- protected:
- friend class base::RefCountedThreadSafe<ShellDataSourceReader>;
- virtual ~ShellDataSourceReader();
- // blocking read callback
- virtual void BlockingReadCompleted(int bytes_read);
-
- base::Lock lock_;
- DataSource* data_source_;
- base::WaitableEvent blocking_read_event_;
- int64 file_size_;
- bool read_has_failed_;
- int last_bytes_read_; // protected implicitly by blocking_read_event_
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_DATA_SOURCE_READER_H_
diff --git a/src/media/base/shell_filter_graph_log_constants.h b/src/media/base/shell_filter_graph_log_constants.h
deleted file mode 100644
index 2faa11d..0000000
--- a/src/media/base/shell_filter_graph_log_constants.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_FILTER_GRAPH_LOG_CONSTANTS_H_
-#define MEDIA_BASE_SHELL_FILTER_GRAPH_LOG_CONSTANTS_H_
-
-namespace media {
-
-// 4 bytes for object type and 4 bytes for signal type following mp4-like
-// binary conventions of packed UTF-8 characters in machine-endian quads.
-
-// this file is also parsed by the python-based log pretty-printer tool.
-// to support easy python parsing, all values should be in hex, and all
-// names should be prefixed with one of ObjectId,Event,or State.
-
-static const uint32 kObjectIdBufferFactory = 0x62756672; // 'bufr'
-static const uint32 kObjectIdDemuxer = 0x646d7578; // 'dmux'
-static const uint32 kObjectIdAudioDemuxerStream = 0x75617364; // 'dsau'
-static const uint32 kObjectIdVideoDemuxerStream = 0x69767364; // 'dsvi'
-static const uint32 kObjectIdAudioDecoder = 0x61646563; // 'adec'
-static const uint32 kObjectIdAudioRenderer = 0x61726e64; // 'arnd'
-static const uint32 kObjectIdAudioSink = 0x6b6e6973; // 'sink'
-static const uint32 kObjectIdVideoDecoder = 0x76646563; // 'vdec'
-static const uint32 kObjectIdVideoRenderer = 0x76726e64; // 'vrnd'
-static const uint32 kObjectIdGraphics = 0x67726166; // 'graf'
-
-static const uint32 kEventArrayAllocationError = 0x61796572; // 'ayer'
-static const uint32 kEventArrayAllocationDeferred = 0x61797774; // 'aywt'
-static const uint32 kEventArrayAllocationReclaim = 0x61797263; // 'ayrc'
-static const uint32 kEventArrayAllocationRequest = 0x61797270; // 'ayrq'
-static const uint32 kEventArrayAllocationSuccess = 0x61796f6b; // 'ayok'
-static const uint32 kEventAudioClock = 0x61636c6b; // 'aclk'
-static const uint32 kEventBufferAllocationError = 0x62666572; // 'bfer'
-static const uint32 kEventBufferAllocationDeferred = 0x62667774; // 'bfwt'
-static const uint32 kEventBufferAllocationReclaim = 0x62667263; // 'bfrc'
-static const uint32 kEventBufferAllocationRequest = 0x62667270; // 'bfrq'
-static const uint32 kEventBufferAllocationSuccess = 0x62666f6b; // 'bfok'
-static const uint32 kEventConstructor = 0x63746f72; // 'ctor'
-static const uint32 kEventDataDecoded = 0x64617461; // 'data'
-static const uint32 kEventDecode = 0x64636f64; // 'dcod'
-static const uint32 kEventDownloadAudio = 0x6c646175; // 'ldau'
-static const uint32 kEventDownloadVideo = 0x6c647664; // 'ldvd
-static const uint32 kEventDropFrame = 0x64726f70; // 'drop'
-static const uint32 kEventEndOfStreamReceived = 0x656f7372; // 'eosr'
-static const uint32 kEventEndOfStreamSent = 0x656f7373; // 'eoss'
-static const uint32 kEventEnqueue = 0x6e717565; // 'nque'
-static const uint32 kEventFatalError = 0x65726f72; // 'eror'
-static const uint32 kEventFlush = 0x666c7368; // 'flsh'
-static const uint32 kEventFrameComposite = 0x64726177; // 'draw'
-static const uint32 kEventFrameFlip = 0x666c6970; // 'flip'
-static const uint32 kEventFreeInputBuffer = 0x6672696e; // 'frin'
-static const uint32 kEventFreeOutputBuffer = 0x66726f75; // 'frou'
-static const uint32 kEventInitialize = 0x696e6974; // 'init'
-static const uint32 kEventOutputBufferFull = 0x66756c6c; // 'full'
-static const uint32 kEventPause = 0x70617573; // 'paus'
-static const uint32 kEventPlay = 0x706c6179; // 'play'
-static const uint32 kEventPop = 0x706f7020; // 'pop '
-static const uint32 kEventPreroll = 0x70726f6c; // 'prol'
-static const uint32 kEventPush = 0x70757368; // 'push'
-static const uint32 kEventRead = 0x72656164; // 'read'
-static const uint32 kEventRender = 0x726e6472; // 'rndr'
-static const uint32 kEventRequestAudio = 0x72657161; // 'reqa'
-static const uint32 kEventRequestInterrupt = 0x69726570; // 'ireq'
-static const uint32 kEventRequestVideo = 0x72657176; // 'reqv'
-static const uint32 kEventReset = 0x72736574; // 'rset'
-static const uint32 kEventResume = 0x7273756d; // 'rsum'
-static const uint32 kEventSeek = 0x7365656b; // 'seek'
-static const uint32 kEventStart = 0x73747274; // 'strt'
-static const uint32 kEventStop = 0x73746f70; // 'stop'
-static const uint32 kEventTimeCallback = 0x74696d65; // 'time'
-static const uint32 kEventUnderflow = 0x75666c77; // 'uflw'
-static const uint32 kEventViewHostComposite = 0x76686365; // 'vhce'
-static const uint32 kEventWebKitComposite = 0x776b6365; // 'wkce'
-
-// instead of timestamp the following state flags log individual pipeline
-// state information.
-// two uint32s of buffer queue size and read cb queue size
-static const uint32 kStateDemuxerStreamQueues = 0x73657571; // 'ques'
-// one uint32 either zero or one depending on state, and a zero
-static const uint32 kStateDemuxerStreamBuffering = 0x66667562; // 'buff'
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_FILTER_GRAPH_LOG_CONSTANTS_H_
diff --git a/src/media/base/shell_media_platform.cc b/src/media/base/shell_media_platform.cc
deleted file mode 100644
index 3d33287..0000000
--- a/src/media/base/shell_media_platform.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_media_platform.h"
-
-namespace {
-
-media::ShellMediaPlatform* s_shell_media_platform_;
-
-} // namespace
-
-namespace media {
-
-// static
-ShellMediaPlatform* ShellMediaPlatform::Instance() {
- return s_shell_media_platform_;
-}
-
-// static
-void ShellMediaPlatform::SetInstance(ShellMediaPlatform* shell_media_platform) {
- s_shell_media_platform_ = shell_media_platform;
-}
-
-} // namespace media
diff --git a/src/media/base/shell_media_platform.h b/src/media/base/shell_media_platform.h
deleted file mode 100644
index 86f2d79..0000000
--- a/src/media/base/shell_media_platform.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_MEDIA_PLATFORM_H_
-#define MEDIA_BASE_SHELL_MEDIA_PLATFORM_H_
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/limits.h"
-#include "media/base/media_export.h"
-#include "media/base/shell_buffer_factory.h"
-#include "media/base/shell_video_data_allocator.h"
-#include "media/base/shell_video_frame_provider.h"
-#include "starboard/decode_target.h"
-
-namespace cobalt {
-namespace render_tree {
-class ResourceProvider;
-} // namespace render_tree
-} // namespace cobalt
-
-namespace media {
-
-// This class is meant to be the single point to attach platform specific media
-// classes and settings. Each platform should implement its own implementation
-// and provide it through ShellMediaPlatform::Instance.
-class MEDIA_EXPORT ShellMediaPlatform {
- public:
- ShellMediaPlatform() {}
- virtual ~ShellMediaPlatform() {}
-
- // Individual platforms should implement the following two functions to
- // ensure that a valid ShellMediaPlatform instance is available during the
- // life time of media stack
- static void Initialize();
- static void Terminate();
-
- static ShellMediaPlatform* Instance();
-
- // The following functions will be called when the application enters or
- // leaves suspending status.
- virtual void Suspend() {}
- virtual void Resume(
- cobalt::render_tree::ResourceProvider* /*resource_provider*/) {}
-
- // Media stack buffer allocate/free functions currently only used by
- // ShellBufferFactory.
- virtual void* AllocateBuffer(size_t size) = 0;
- virtual void FreeBuffer(void* ptr) = 0;
-
- // The maximum audio and video buffer size used by SourceBufferStream.
- // See implementation of SourceBufferStream for more details.
- virtual size_t GetSourceBufferStreamAudioMemoryLimit() const = 0;
- virtual size_t GetSourceBufferStreamVideoMemoryLimit() const = 0;
-
- virtual scoped_refptr<ShellVideoFrameProvider> GetVideoFrameProvider() {
- return NULL;
- }
-
-#if SB_API_VERSION >= 4
- virtual SbDecodeTargetGraphicsContextProvider*
- GetSbDecodeTargetGraphicsContextProvider() { return NULL; }
-#elif SB_API_VERSION >= 3
- virtual SbDecodeTargetProvider* GetSbDecodeTargetProvider() { return NULL; }
-#endif // SB_API_VERSION >= 4
-
- // Total number of video frames which are populating in the pipeline when
- // prerolling.
- // You can expect more start delay by increasing this.
- virtual int GetMaxVideoPrerollFrames() const {
- return limits::kMaxVideoFrames;
- }
- // When the video frame backlog contains less frames than this value, the
- // video renderer will send out underflow notification to the video decoder.
- virtual int GetVideoUnderflowFrames() const {
- return GetMaxVideoPrerollFrames();
- }
- // Total number of video frames which are populating in the pipeline.
- // You can expect more memory usage and less jitter by increasing this.
- virtual int GetMaxVideoFrames() const { return limits::kMaxVideoFrames; }
-
- // This function is called before the decoder buffer leaves the demuxer and
- // is being sent to the media pipeline for decrypting and decoding. The
- // default implementation simply returns the buffer indicateing that there is
- // no processing necessary.
- virtual scoped_refptr<DecoderBuffer> ProcessBeforeLeavingDemuxer(
- const scoped_refptr<DecoderBuffer>& buffer) {
- return buffer;
- }
-
- // Returns true if output is protected (i.e. HDCP is present).
- virtual bool IsOutputProtected() = 0;
-
- protected:
- static void SetInstance(ShellMediaPlatform* shell_media_platform);
-
- private:
- // Platform specific media Init and Tear down.
- virtual void InternalInitialize() {}
- virtual void InternalTerminate() {}
-
- DISALLOW_COPY_AND_ASSIGN(ShellMediaPlatform);
-};
-
-} // namespace media
-
-#define REGISTER_SHELL_MEDIA_PLATFORM(ClassName) \
- namespace media { \
- void ShellMediaPlatform::Initialize() { \
- DCHECK(!Instance()); \
- SetInstance(new ClassName); \
- ShellBufferFactory::Initialize(); \
- Instance()->InternalInitialize(); \
- } \
- void ShellMediaPlatform::Terminate() { \
- DCHECK(Instance()); \
- Instance()->InternalTerminate(); \
- ShellBufferFactory::Terminate(); \
- delete Instance(); \
- SetInstance(NULL); \
- } \
- } // namespace media
-
-#endif // MEDIA_BASE_SHELL_MEDIA_PLATFORM_H_
diff --git a/src/media/base/shell_media_statistics.cc b/src/media/base/shell_media_statistics.cc
deleted file mode 100644
index 14f6b82..0000000
--- a/src/media/base/shell_media_statistics.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_media_statistics.h"
-
-#include <limits>
-
-#include "base/basictypes.h"
-
-namespace media {
-
-ShellMediaStatistics::ShellMediaStatistics() {
- Reset(true); // reset all stats, include global stats.
-}
-
-void ShellMediaStatistics::OnPlaybackBegin() {
- Reset(false); // reset non-global stats.
-}
-
-void ShellMediaStatistics::record(StatType type, int64 value) {
- if (type == STAT_TYPE_VIDEO_WIDTH)
- type = STAT_TYPE_VIDEO_WIDTH;
- ++stats_[type].times_;
- stats_[type].total_ += value;
- if (value > stats_[type].max_)
- stats_[type].max_ = value;
- if (value < stats_[type].min_)
- stats_[type].min_ = value;
- stats_[type].current_ = value;
-}
-
-void ShellMediaStatistics::record(StatType type,
- const base::TimeDelta& duration) {
- record(type, duration.ToInternalValue());
-}
-
-double ShellMediaStatistics::GetElapsedTime() const {
- return (base::Time::Now() - start_).InSecondsF();
-}
-
-int64 ShellMediaStatistics::GetTimes(StatType type) const {
- return stats_[type].times_;
-}
-
-int64 ShellMediaStatistics::GetTotal(StatType type) const {
- return stats_[type].total_;
-}
-
-int64 ShellMediaStatistics::GetCurrent(StatType type) const {
- return stats_[type].current_;
-}
-
-int64 ShellMediaStatistics::GetAverage(StatType type) const {
- if (stats_[type].times_ == 0)
- return 0;
- return stats_[type].total_ / stats_[type].times_;
-}
-
-int64 ShellMediaStatistics::GetMin(StatType type) const {
- return stats_[type].min_;
-}
-
-int64 ShellMediaStatistics::GetMax(StatType type) const {
- return stats_[type].max_;
-}
-
-double ShellMediaStatistics::GetTotalDuration(StatType type) const {
- return base::TimeDelta::FromInternalValue(GetTotal(type)).InSecondsF();
-}
-
-double ShellMediaStatistics::GetCurrentDuration(StatType type) const {
- return base::TimeDelta::FromInternalValue(GetCurrent(type)).InSecondsF();
-}
-
-double ShellMediaStatistics::GetAverageDuration(StatType type) const {
- return base::TimeDelta::FromInternalValue(GetAverage(type)).InSecondsF();
-}
-
-double ShellMediaStatistics::GetMinDuration(StatType type) const {
- return base::TimeDelta::FromInternalValue(GetMin(type)).InSecondsF();
-}
-
-double ShellMediaStatistics::GetMaxDuration(StatType type) const {
- return base::TimeDelta::FromInternalValue(GetMax(type)).InSecondsF();
-}
-
-// static
-ShellMediaStatistics& ShellMediaStatistics::Instance() {
- static ShellMediaStatistics media_statistics;
- return media_statistics;
-}
-
-void ShellMediaStatistics::Reset(bool include_global_stats) {
- start_ = base::Time::Now();
- int items_to_reset =
- include_global_stats ? arraysize(stats_) : STAT_TYPE_START_OF_GLOBAL_STAT;
- for (int i = 0; i < items_to_reset; ++i) {
- // We deliberately not reset current_ so its value can be kept after reset.
- stats_[i].times_ = 0;
- stats_[i].total_ = 0;
- stats_[i].min_ = std::numeric_limits<int64>::max();
- stats_[i].max_ = std::numeric_limits<int64>::min();
- }
-}
-
-ShellScopedMediaStat::ShellScopedMediaStat(ShellMediaStatistics::StatType type)
- : type_(type), start_(base::Time::Now()) {}
-
-ShellScopedMediaStat::~ShellScopedMediaStat() {
- ShellMediaStatistics::Instance().record(type_, base::Time::Now() - start_);
-}
-
-} // namespace media
diff --git a/src/media/base/shell_media_statistics.h b/src/media/base/shell_media_statistics.h
deleted file mode 100644
index e44a794..0000000
--- a/src/media/base/shell_media_statistics.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright 2012 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_MEDIA_STATISTICS_H_
-#define MEDIA_BASE_SHELL_MEDIA_STATISTICS_H_
-
-#include "base/synchronization/lock.h"
-#include "base/time.h"
-
-namespace media {
-
-// This class collects events and their durations in the media stack.
-// Note that it is not thread safe but as its purpose is just to collect
-// performance data it is ok to call it from different threads.
-class ShellMediaStatistics {
- public:
- enum StatType {
- STAT_TYPE_AUDIO_CODEC,
- STAT_TYPE_AUDIO_CHANNELS,
- STAT_TYPE_AUDIO_SAMPLE_PER_SECOND,
- STAT_TYPE_AUDIO_UNDERFLOW,
- STAT_TYPE_VIDEO_CODEC,
- STAT_TYPE_VIDEO_WIDTH,
- STAT_TYPE_VIDEO_HEIGHT,
- // Decode a video frame
- STAT_TYPE_VIDEO_FRAME_DECODE,
- // Drop a video frame that we don't have enough time to render
- STAT_TYPE_VIDEO_FRAME_DROP,
- // The frame arrives too late to the renderer, usually indicates that it
- // takes too many time in the decoder.
- STAT_TYPE_VIDEO_FRAME_LATE,
- // How many frames we cached in the video renderer. If this value drops it
- // is more likely that we will have jitter.
- STAT_TYPE_VIDEO_RENDERER_BACKLOG,
- // Time spend in decrypting a buffer
- STAT_TYPE_DECRYPT,
- // The stat types after the following are global stats. i.e. their values
- // will be preserved between playng back of different videos.
- STAT_TYPE_START_OF_GLOBAL_STAT,
- // The size of the shell buffer block just allocated.
- STAT_TYPE_ALLOCATED_SHELL_BUFFER_SIZE,
- STAT_TYPE_MAX
- };
-
- // The structure is used to track all statistics. int64 is used here so we
- // can track a TimeDelta. Note that some of the fields may not be meaningful
- // to all Stat types. For example, `total` is not meanful to video resolution
- // as we don't care about the sum of video resolultion over time. But for
- // simplicity we just keep all the information and it is the responsibility
- // of the user of this class to use the individual statistics correctly.
- struct Stat {
- int64 times_; // How many times the stat has been updated.
- int64 current_;
- int64 total_;
- int64 min_;
- int64 max_;
- };
-
- ShellMediaStatistics();
-
- void OnPlaybackBegin();
- void record(StatType type, int64 value);
- void record(StatType type, const base::TimeDelta& duration);
-
- // Returns the time elapsed since last reset in the unit of second.
- double GetElapsedTime() const;
- int64 GetTimes(StatType type) const;
-
- int64 GetTotal(StatType type) const;
- int64 GetCurrent(StatType type) const;
- int64 GetAverage(StatType type) const;
- int64 GetMin(StatType type) const;
- int64 GetMax(StatType type) const;
-
- // The following access functions are just provided for easy of use. They are
- // not applicable to all stats. it is the responsibility of the user of these
- // functions to ensure that the call is valid.
- // The unit of time is second.
- double GetTotalDuration(StatType type) const;
- double GetCurrentDuration(StatType type) const;
- double GetAverageDuration(StatType type) const;
- double GetMinDuration(StatType type) const;
- double GetMaxDuration(StatType type) const;
-
- static ShellMediaStatistics& Instance();
-
- private:
- void Reset(bool include_global_stats);
-
- base::Time start_;
- Stat stats_[STAT_TYPE_MAX];
-};
-
-class ShellScopedMediaStat {
- public:
- ShellScopedMediaStat(ShellMediaStatistics::StatType type);
- ~ShellScopedMediaStat();
-
- private:
- ShellMediaStatistics::StatType type_;
- base::Time start_;
-};
-
-} // namespace media
-
-#if defined(__LB_SHELL__FOR_RELEASE__)
-#define UPDATE_MEDIA_STATISTICS(type, value) \
- do { \
- } while (false)
-
-// This macro reports a media stat with its duration
-#define SCOPED_MEDIA_STATISTICS(type) \
- do { \
- } while (false)
-#else // defined(__LB_SHELL__FOR_RELEASE__)
-// This macro reports a media stat with its new value
-#define UPDATE_MEDIA_STATISTICS(type, value) \
- media::ShellMediaStatistics::Instance().record( \
- media::ShellMediaStatistics::type, value)
-
-// This macro reports a media stat with its duration
-#define SCOPED_MEDIA_STATISTICS(type) \
- media::ShellScopedMediaStat statistics_event( \
- media::ShellMediaStatistics::type)
-#endif // defined(__LB_SHELL__FOR_RELEASE__)
-
-#endif // MEDIA_BASE_SHELL_MEDIA_STATISTICS_H_
diff --git a/src/media/base/shell_video_data_allocator.cc b/src/media/base/shell_video_data_allocator.cc
deleted file mode 100644
index e47e518..0000000
--- a/src/media/base/shell_video_data_allocator.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_video_data_allocator.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-ShellVideoDataAllocator::YV12Param::YV12Param(int decoded_width,
- int decoded_height,
- const gfx::Rect& visible_rect,
- uint8* data)
- : decoded_width_(decoded_width),
- decoded_height_(decoded_height),
- visible_rect_(visible_rect),
- y_pitch_(decoded_width),
- uv_pitch_(decoded_width / 2),
- y_data_(data),
- u_data_(y_data_ + y_pitch_ * decoded_height_),
- v_data_(u_data_ + uv_pitch_ * decoded_height_ / 2) {}
-
-ShellVideoDataAllocator::YV12Param::YV12Param(int width,
- int height,
- int y_pitch,
- int uv_pitch,
- uint8* y_data,
- uint8* u_data,
- uint8* v_data)
- : decoded_width_(width),
- decoded_height_(height),
- visible_rect_(0, 0, width, height),
- y_pitch_(y_pitch),
- uv_pitch_(uv_pitch),
- y_data_(y_data),
- u_data_(u_data),
- v_data_(v_data) {
- DCHECK_NE(y_pitch_, 0);
- DCHECK_NE(uv_pitch_, 0);
- DCHECK(y_data_);
- DCHECK(u_data);
- DCHECK(v_data);
-}
-
-ShellVideoDataAllocator::NV12Param::NV12Param(int width,
- int height,
- int y_pitch,
- const gfx::Rect& visible_rect)
- : decoded_width_(width),
- decoded_height_(height),
- y_pitch_(y_pitch),
- visible_rect_(visible_rect) {}
-
-} // namespace media
diff --git a/src/media/base/shell_video_data_allocator.h b/src/media/base/shell_video_data_allocator.h
deleted file mode 100644
index ca164a2..0000000
--- a/src/media/base/shell_video_data_allocator.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_VIDEO_DATA_ALLOCATOR_H_
-#define MEDIA_BASE_SHELL_VIDEO_DATA_ALLOCATOR_H_
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-class ShellRawVideoDecoder;
-
-// This class is introduced to remove the hidden dependency on the platform
-// dependent graphics code from low level video decoders. It is possible that
-// this can be achieved via interfaces created on each platform. However, to
-// abstract them into a common interface is more explicit.
-class MEDIA_EXPORT ShellVideoDataAllocator {
- public:
- class FrameBuffer : public base::RefCountedThreadSafe<FrameBuffer> {
- public:
- FrameBuffer() {}
- virtual ~FrameBuffer() {}
- virtual uint8* data() const = 0;
- virtual size_t size() const = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(FrameBuffer);
- };
-
- class YV12Param {
- public:
- YV12Param(int decoded_width,
- int decoded_height,
- const gfx::Rect& visible_rect,
- uint8* data);
-
- // Create with data pointer to individual planes. All pointers should be in
- // the same memory block controlled by the accompanied FrameBuffer passed to
- // CreateYV12Frame. The decoded size and visible rect are assumed to be the
- // same. It is only used for decoding vp9 frames.
- YV12Param(int width,
- int height,
- int y_pitch,
- int uv_pitch,
- uint8* y_data,
- uint8* u_data,
- uint8* v_data);
- int y_pitch() const {
- DCHECK_NE(y_pitch_, 0);
- return y_pitch_;
- }
- int uv_pitch() const {
- DCHECK_NE(uv_pitch_, 0);
- return uv_pitch_;
- }
- uint8* y_data() const {
- DCHECK(y_data_);
- return y_data_;
- }
- uint8* u_data() const {
- DCHECK(u_data_);
- return u_data_;
- }
- uint8* v_data() const {
- DCHECK(v_data_);
- return v_data_;
- }
-
- int decoded_width() const { return decoded_width_; }
- int decoded_height() const { return decoded_height_; }
- const gfx::Rect& visible_rect() const { return visible_rect_; }
-
- private:
- int decoded_width_;
- int decoded_height_;
-
- gfx::Rect visible_rect_;
-
- int y_pitch_;
- int uv_pitch_;
- uint8* y_data_;
- uint8* u_data_;
- uint8* v_data_;
- };
-
- // Only used for some platforms' hardware AVC decoder that only support NV12
- // output.
- class NV12Param {
- public:
- NV12Param(int decoded_width,
- int decoded_height,
- int y_pitch,
- const gfx::Rect& visible_rect);
-
- int decoded_width() const { return decoded_width_; }
- int decoded_height() const { return decoded_height_; }
- int y_pitch() const { return y_pitch_; }
- const gfx::Rect& visible_rect() const { return visible_rect_; }
-
- private:
- int decoded_width_;
- int decoded_height_;
- int y_pitch_;
- gfx::Rect visible_rect_;
- };
-
- ShellVideoDataAllocator() {}
- virtual ~ShellVideoDataAllocator() {}
-
- // Allocate a buffer to store the video frame to be decoded.
- virtual scoped_refptr<FrameBuffer> AllocateFrameBuffer(size_t size,
- size_t alignment) = 0;
- virtual scoped_refptr<VideoFrame> CreateYV12Frame(
- const scoped_refptr<FrameBuffer>& frame_buffer,
- const YV12Param& param,
- const base::TimeDelta& timestamp) = 0;
-
- // Some hardware AVC decoders only support NV12 output. They are perfectly
- // aligned for rendering as texture though.
- virtual scoped_refptr<VideoFrame> CreateNV12Frame(
- const scoped_refptr<FrameBuffer>& frame_buffer,
- const NV12Param& param,
- const base::TimeDelta& timestamp) {
- UNREFERENCED_PARAMETER(frame_buffer);
- UNREFERENCED_PARAMETER(param);
- UNREFERENCED_PARAMETER(timestamp);
- NOTREACHED();
- return NULL;
- }
-
- // Return a video frame filled with RGBA zero on platforms that require punch
- // out. Return NULL otherwise.
- virtual scoped_refptr<VideoFrame> GetPunchOutFrame() { return NULL; }
-
- // Most platforms limit the number of active raw video decoders to one. The
- // following functions enable the implementations on these platforms to check
- // if there is more than one video decoder active.
- virtual void Acquire(ShellRawVideoDecoder* owner) {
- UNREFERENCED_PARAMETER(owner);
- }
- virtual void Release(ShellRawVideoDecoder* owner) {
- UNREFERENCED_PARAMETER(owner);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ShellVideoDataAllocator);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_VIDEO_DATA_ALLOCATOR_H_
diff --git a/src/media/base/shell_video_frame_provider.cc b/src/media/base/shell_video_frame_provider.cc
deleted file mode 100644
index 0cf2f98..0000000
--- a/src/media/base/shell_video_frame_provider.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "media/base/shell_video_frame_provider.h"
-
-#include "base/logging.h"
-#if SB_API_VERSION >= 4
-#include "starboard/decode_target.h"
-#endif // #if SB_API_VERSION >= 4
-
-namespace media {
-
-ShellVideoFrameProvider::ShellVideoFrameProvider(
- scoped_refptr<VideoFrame> punch_out)
- : punch_out_(punch_out), has_consumed_frames_(false), dropped_frames_(0),
- output_mode_(kOutputModeInvalid) {
-#if !defined(__LB_SHELL__FOR_RELEASE__)
- max_delay_in_microseconds_ = 0;
-#endif // !defined(__LB_SHELL__FOR_RELEASE__)
-}
-
-void ShellVideoFrameProvider::RegisterMediaTimeAndSeekingStateCB(
- const MediaTimeAndSeekingStateCB& media_time_and_seeking_state_cb) {
- DCHECK(!media_time_and_seeking_state_cb.is_null());
- base::AutoLock auto_lock(frames_lock_);
- media_time_and_seeking_state_cb_ = media_time_and_seeking_state_cb;
-}
-
-void ShellVideoFrameProvider::UnregisterMediaTimeAndSeekingStateCB(
- const MediaTimeAndSeekingStateCB& media_time_and_seeking_state_cb) {
- base::AutoLock auto_lock(frames_lock_);
- // It is possible that the register of a new callback happens earlier than the
- // unregister of the previous callback. Always ensure that the callback
- // passed in is the current one before resetting.
- if (media_time_and_seeking_state_cb_.Equals(
- media_time_and_seeking_state_cb)) {
- media_time_and_seeking_state_cb_.Reset();
- }
-}
-
-const scoped_refptr<VideoFrame>& ShellVideoFrameProvider::GetCurrentFrame() {
- if (punch_out_) {
- current_frame_ = punch_out_;
- return current_frame_;
- }
-
- const int kEpsilonInMicroseconds =
- base::Time::kMicrosecondsPerSecond / 60 / 2;
-
- base::AutoLock auto_lock(frames_lock_);
-
- base::TimeDelta media_time;
- bool is_seeking;
- GetMediaTimeAndSeekingState_Locked(&media_time, &is_seeking);
- while (!frames_.empty()) {
- int64_t frame_time = frames_[0]->GetTimestamp().InMicroseconds();
- if (frame_time >= media_time.InMicroseconds())
- break;
- if (current_frame_ != frames_[0] &&
- frame_time + kEpsilonInMicroseconds >= media_time.InMicroseconds())
- break;
-
- if (current_frame_ != frames_[0] && !is_seeking) {
- ++dropped_frames_;
-
-#if !defined(__LB_SHELL__FOR_RELEASE__)
- if (media_time.InMicroseconds() - frame_time > max_delay_in_microseconds_)
- max_delay_in_microseconds_ = media_time.InMicroseconds() - frame_time;
- const bool kLogFrameDrops ALLOW_UNUSED = false;
- LOG_IF(WARNING, kLogFrameDrops)
- << "dropped one frame with timestamp "
- << frames_[0]->GetTimestamp().InMicroseconds() << " at media time "
- << media_time.InMicroseconds() << " total dropped " << dropped_frames_
- << " frames with a max delay of " << max_delay_in_microseconds_
- << " ms";
-#endif // !defined(__LB_SHELL__FOR_RELEASE__)
- }
-
- if (frames_.size() == 1) {
- current_frame_ = frames_[0];
- }
-
- frames_.erase(frames_.begin());
- has_consumed_frames_ = true;
- }
- if (!frames_.empty()) {
- current_frame_ = frames_[0];
- }
- return current_frame_;
-}
-
-void ShellVideoFrameProvider::SetOutputMode(OutputMode output_mode) {
- base::AutoLock auto_lock(frames_lock_);
- output_mode_ = output_mode;
-}
-
-ShellVideoFrameProvider::OutputMode ShellVideoFrameProvider::GetOutputMode()
- const {
- base::AutoLock auto_lock(frames_lock_);
- return output_mode_;
-}
-
-#if SB_API_VERSION >= 4
-
-void ShellVideoFrameProvider::SetGetCurrentSbDecodeTargetFunction(
- GetCurrentSbDecodeTargetFunction function) {
- base::AutoLock auto_lock(frames_lock_);
- get_current_sb_decode_target_function_ = function;
-}
-
-void ShellVideoFrameProvider::ResetGetCurrentSbDecodeTargetFunction() {
- base::AutoLock auto_lock(frames_lock_);
- get_current_sb_decode_target_function_.Reset();
-}
-
-SbDecodeTarget ShellVideoFrameProvider::GetCurrentSbDecodeTarget() const {
- base::AutoLock auto_lock(frames_lock_);
- if (get_current_sb_decode_target_function_.is_null()) {
- return kSbDecodeTargetInvalid;
- } else {
- return get_current_sb_decode_target_function_.Run();
- }
-}
-
-#endif // #if SB_API_VERSION >= 4
-
-void ShellVideoFrameProvider::AddFrame(const scoped_refptr<VideoFrame>& frame) {
- base::AutoLock auto_lock(frames_lock_);
- frames_.push_back(frame);
-}
-
-void ShellVideoFrameProvider::Flush() {
- base::AutoLock auto_lock(frames_lock_);
- frames_.clear();
-}
-
-void ShellVideoFrameProvider::Stop() {
- base::AutoLock auto_lock(frames_lock_);
- frames_.clear();
- current_frame_ = NULL;
- dropped_frames_ = 0;
-}
-
-size_t ShellVideoFrameProvider::GetNumOfFramesCached() const {
- base::AutoLock auto_lock(frames_lock_);
- return frames_.size();
-}
-
-void ShellVideoFrameProvider::GetMediaTimeAndSeekingState_Locked(
- base::TimeDelta* media_time,
- bool* is_seeking) const {
- DCHECK(media_time);
- DCHECK(is_seeking);
- frames_lock_.AssertAcquired();
- if (media_time_and_seeking_state_cb_.is_null()) {
- *media_time = base::TimeDelta();
- *is_seeking = false;
- return;
- }
- media_time_and_seeking_state_cb_.Run(media_time, is_seeking);
-}
-
-bool ShellVideoFrameProvider::QueryAndResetHasConsumedFrames() {
- base::AutoLock auto_lock(frames_lock_);
- bool previous_value = has_consumed_frames_;
- has_consumed_frames_ = false;
- return previous_value;
-}
-
-int ShellVideoFrameProvider::ResetAndReturnDroppedFrames() {
- base::AutoLock auto_lock(frames_lock_);
- int dropped_frames = dropped_frames_;
- dropped_frames_ = 0;
- return dropped_frames;
-}
-
-} // namespace media
diff --git a/src/media/base/shell_video_frame_provider.h b/src/media/base/shell_video_frame_provider.h
deleted file mode 100644
index ae1e2d0..0000000
--- a/src/media/base/shell_video_frame_provider.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright 2015 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_SHELL_VIDEO_FRAME_PROVIDER_H_
-#define MEDIA_BASE_SHELL_VIDEO_FRAME_PROVIDER_H_
-
-#include <vector>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "base/time.h"
-#include "media/base/video_frame.h"
-#include "starboard/decode_target.h"
-
-namespace media {
-
-// TODO: Remove Shell prefix.
-// The ShellVideoFrameProvider manages the backlog for video frames. It has the
-// following functionalities:
-// 1. It caches the video frames ready to be displayed.
-// 2. It decides which frame to be displayed at the current time.
-// 3. It removes frames that will no longer be displayed.
-class ShellVideoFrameProvider
- : public base::RefCountedThreadSafe<ShellVideoFrameProvider> {
- public:
-#if SB_API_VERSION >= 4
- typedef base::Callback<SbDecodeTarget()> GetCurrentSbDecodeTargetFunction;
-#endif // SB_API_VERSION >= 4
-
- enum OutputMode {
- kOutputModePunchOut,
- kOutputModeDecodeToTexture,
- kOutputModeInvalid,
- };
-
- explicit ShellVideoFrameProvider(scoped_refptr<VideoFrame> punch_out = NULL);
-
- // The calling back returns the current media time and a bool which is set to
- // true when a seek is in progress.
- typedef base::Callback<void(base::TimeDelta*, bool*)>
- MediaTimeAndSeekingStateCB;
- // This class uses the media time to decide which frame is current. It
- // retrieves the media time from the registered media_time_cb. There can only
- // be one registered media_time_cb at a certain time, a call to
- // RegisterMediaTimeAndSeekingStateCB() will overwrite the previously
- // registered callback.
- void RegisterMediaTimeAndSeekingStateCB(
- const MediaTimeAndSeekingStateCB& media_time_and_seeking_state_cb);
- // This function unregisters the media time callback if it hasn't been
- // overwritten by another callback.
- void UnregisterMediaTimeAndSeekingStateCB(
- const MediaTimeAndSeekingStateCB& media_time_and_seeking_state_cb);
-
- // Returns the current frame to be displayed if there is one. Otherwise it
- // returns NULL.
- const scoped_refptr<VideoFrame>& GetCurrentFrame();
-
- void SetOutputMode(OutputMode output_mode);
- OutputMode GetOutputMode() const;
-
-#if SB_API_VERSION >= 4
- // For Starboard platforms that have a decode-to-texture player, we enable
- // this ShellVideoFrameProvider to act as a bridge for Cobalt code to query
- // for the current SbDecodeTarget. In effect, we bypass all of
- // ShellVideoFrameProvider's logic in this case, instead relying on the
- // Starboard implementation to provide us with the current video frame when
- // needed.
- void SetGetCurrentSbDecodeTargetFunction(
- GetCurrentSbDecodeTargetFunction function);
-
- void ResetGetCurrentSbDecodeTargetFunction();
-
- SbDecodeTarget GetCurrentSbDecodeTarget() const;
-#endif // SB_API_VERSION >= 4
-
- void AddFrame(const scoped_refptr<VideoFrame>& frame);
- // Flush will clear all cached frames except the current frame. So the current
- // frame can still be displayed during seek.
- void Flush();
- // Stop will clear all cached frames including the current frame.
- void Stop();
- size_t GetNumOfFramesCached() const;
-
- // Return true if VideoFrames have been released from the internal frames_
- // queue since the last time this was called.
- bool QueryAndResetHasConsumedFrames();
-
- // Return the value of |dropped_frames_| and reset it to 0.
- int ResetAndReturnDroppedFrames();
-
- private:
- void GetMediaTimeAndSeekingState_Locked(base::TimeDelta* media_time,
- bool* is_seeking) const;
-
- scoped_refptr<VideoFrame> punch_out_;
-
- mutable base::Lock frames_lock_;
- MediaTimeAndSeekingStateCB media_time_and_seeking_state_cb_;
- std::vector<scoped_refptr<VideoFrame> > frames_;
- scoped_refptr<VideoFrame> current_frame_;
- bool has_consumed_frames_;
- int dropped_frames_;
-
-#if !defined(__LB_SHELL__FOR_RELEASE__)
- int max_delay_in_microseconds_;
-#endif // !defined(__LB_SHELL__FOR_RELEASE__)
-
- OutputMode output_mode_;
-#if SB_API_VERSION >= 4
- GetCurrentSbDecodeTargetFunction get_current_sb_decode_target_function_;
-#endif // SB_API_VERSION >= 4
-
- DISALLOW_COPY_AND_ASSIGN(ShellVideoFrameProvider);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SHELL_VIDEO_FRAME_PROVIDER_H_
diff --git a/src/media/base/simd/convert_rgb_to_yuv.h b/src/media/base/simd/convert_rgb_to_yuv.h
deleted file mode 100644
index 03fe114..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
-#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
-
-#include "base/basictypes.h"
-#include "media/base/yuv_convert.h"
-
-namespace media {
-
-// Converts an ARGB image to a YV12 image. This function calls ASM functions
-// implemented in "convert_rgb_to_yuv_ssse3.asm" to convert the specified ARGB
-// image to a YV12 image.
-void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-// Converts an RGB image to a YV12 image. This function is almost same as
-// ConvertRGB32ToYUV_SSSE3 except its first argument is a pointer to RGB pixels.
-void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-// SSE2 version of converting RGBA to YV12.
-void ConvertRGB32ToYUV_SSE2(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-// This is a C reference implementation of the above routine.
-// This method should only be used in unit test.
-// TODO(hclam): Should use this as the C version of RGB to YUV.
-void ConvertRGB32ToYUV_SSE2_Reference(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-// C version of converting RGBA to YV12.
-void ConvertRGB32ToYUV_C(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-// C version of converting RGB24 to YV12.
-void ConvertRGB24ToYUV_C(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-} // namespace media
-
-#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
diff --git a/src/media/base/simd/convert_rgb_to_yuv_c.cc b/src/media/base/simd/convert_rgb_to_yuv_c.cc
deleted file mode 100644
index ae4c731..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_c.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/convert_rgb_to_yuv.h"
-
-namespace media {
-
-static int clip_byte(int x) {
- if (x > 255)
- return 255;
- else if (x < 0)
- return 0;
- else
- return x;
-}
-
-void ConvertRGB32ToYUV_C(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- for (int i = 0; i < height; ++i) {
- for (int j = 0; j < width; ++j) {
- // Since the input pixel format is RGB32, there are 4 bytes per pixel.
- const uint8* pixel = rgbframe + 4 * j;
- yplane[j] = clip_byte(((pixel[2] * 66 + pixel[1] * 129 +
- pixel[0] * 25 + 128) >> 8) + 16);
- if (i % 2 == 0 && j % 2 == 0) {
- uplane[j / 2] = clip_byte(((pixel[2] * -38 + pixel[1] * -74 +
- pixel[0] * 112 + 128) >> 8) + 128);
- vplane[j / 2] = clip_byte(((pixel[2] * 112 + pixel[1] * -94 +
- pixel[0] * -18 + 128) >> 8) + 128);
- }
- }
-
- rgbframe += rgbstride;
- yplane += ystride;
- if (i % 2 == 0) {
- uplane += uvstride;
- vplane += uvstride;
- }
- }
-}
-
-void ConvertRGB24ToYUV_C(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- for (int i = 0; i < height; ++i) {
- for (int j = 0; j < width; ++j) {
- // Since the input pixel format is RGB24, there are 3 bytes per pixel.
- const uint8* pixel = rgbframe + 3 * j;
- yplane[j] = clip_byte(((pixel[2] * 66 + pixel[1] * 129 +
- pixel[0] * 25 + 128) >> 8) + 16);
- if (i % 2 == 0 && j % 2 == 0) {
- uplane[j / 2] = clip_byte(((pixel[2] * -38 + pixel[1] * -74 +
- pixel[0] * 112 + 128) >> 8) + 128);
- vplane[j / 2] = clip_byte(((pixel[2] * 112 + pixel[1] * -94 +
- pixel[0] * -18 + 128) >> 8) + 128);
- }
- }
-
- rgbframe += rgbstride;
- yplane += ystride;
- if (i % 2 == 0) {
- uplane += uvstride;
- vplane += uvstride;
- }
- }
-}
-
-} // namespace media
diff --git a/src/media/base/simd/convert_rgb_to_yuv_sse2.cc b/src/media/base/simd/convert_rgb_to_yuv_sse2.cc
deleted file mode 100644
index f99a2fe..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_sse2.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "build/build_config.h"
-#include "media/base/simd/convert_rgb_to_yuv.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
-
-#if defined(COMPILER_MSVC)
-#include <intrin.h>
-#else
-#include <mmintrin.h>
-#include <emmintrin.h>
-#endif
-
-namespace media {
-
-#define FIX_SHIFT 12
-#define FIX(x) ((x) * (1 << FIX_SHIFT))
-
-// Define a convenient macro to do static cast.
-#define INT16_FIX(x) static_cast<int16>(FIX(x))
-
-SIMD_ALIGNED(const int16 ConvertRGBAToYUV_kTable[8 * 3]) = {
- INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
- INT16_FIX(0.098), INT16_FIX(0.504), INT16_FIX(0.257), 0,
- INT16_FIX(0.439), -INT16_FIX(0.291), -INT16_FIX(0.148), 0,
- INT16_FIX(0.439), -INT16_FIX(0.291), -INT16_FIX(0.148), 0,
- -INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
- -INT16_FIX(0.071), -INT16_FIX(0.368), INT16_FIX(0.439), 0,
-};
-
-#undef INT16_FIX
-
-// This is the final offset for the conversion from signed yuv values to
-// unsigned values. It is arranged so that offset of 16 is applied to Y
-// components and 128 is added to UV components for 2 pixels.
-SIMD_ALIGNED(const int32 kYOffset[4]) = {16, 16, 16, 16};
-
-static inline int Clamp(int value) {
- if (value < 0)
- return 0;
- if (value > 255)
- return 255;
- return value;
-}
-
-static inline int RGBToY(int r, int g, int b) {
- int y = ConvertRGBAToYUV_kTable[0] * b +
- ConvertRGBAToYUV_kTable[1] * g +
- ConvertRGBAToYUV_kTable[2] * r;
- y >>= FIX_SHIFT;
- return Clamp(y + 16);
-}
-
-static inline int RGBToU(int r, int g, int b, int shift) {
- int u = ConvertRGBAToYUV_kTable[8] * b +
- ConvertRGBAToYUV_kTable[9] * g +
- ConvertRGBAToYUV_kTable[10] * r;
- u >>= FIX_SHIFT + shift;
- return Clamp(u + 128);
-}
-
-static inline int RGBToV(int r, int g, int b, int shift) {
- int v = ConvertRGBAToYUV_kTable[16] * b +
- ConvertRGBAToYUV_kTable[17] * g +
- ConvertRGBAToYUV_kTable[18] * r;
- v >>= FIX_SHIFT + shift;
- return Clamp(v + 128);
-}
-
-#define CONVERT_Y(rgb_buf, y_buf) \
- b = *rgb_buf++; \
- g = *rgb_buf++; \
- r = *rgb_buf++; \
- ++rgb_buf; \
- sum_b += b; \
- sum_g += g; \
- sum_r += r; \
- *y_buf++ = RGBToY(r, g, b);
-
-static inline void ConvertRGBToYUV_V2H2(const uint8* rgb_buf_1,
- const uint8* rgb_buf_2,
- uint8* y_buf_1,
- uint8* y_buf_2,
- uint8* u_buf,
- uint8* v_buf) {
- int sum_b = 0;
- int sum_g = 0;
- int sum_r = 0;
- int r, g, b;
-
-
-
- CONVERT_Y(rgb_buf_1, y_buf_1);
- CONVERT_Y(rgb_buf_1, y_buf_1);
- CONVERT_Y(rgb_buf_2, y_buf_2);
- CONVERT_Y(rgb_buf_2, y_buf_2);
- *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 2);
- *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 2);
-}
-
-static inline void ConvertRGBToYUV_V2H1(const uint8* rgb_buf_1,
- const uint8* rgb_buf_2,
- uint8* y_buf_1,
- uint8* y_buf_2,
- uint8* u_buf,
- uint8* v_buf) {
- int sum_b = 0;
- int sum_g = 0;
- int sum_r = 0;
- int r, g, b;
-
- CONVERT_Y(rgb_buf_1, y_buf_1);
- CONVERT_Y(rgb_buf_2, y_buf_2);
- *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 1);
- *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 1);
-}
-
-static inline void ConvertRGBToYUV_V1H2(const uint8* rgb_buf,
- uint8* y_buf,
- uint8* u_buf,
- uint8* v_buf) {
- int sum_b = 0;
- int sum_g = 0;
- int sum_r = 0;
- int r, g, b;
-
- CONVERT_Y(rgb_buf, y_buf);
- CONVERT_Y(rgb_buf, y_buf);
- *u_buf++ = RGBToU(sum_r, sum_g, sum_b, 1);
- *v_buf++ = RGBToV(sum_r, sum_g, sum_b, 1);
-}
-
-static inline void ConvertRGBToYUV_V1H1(const uint8* rgb_buf,
- uint8* y_buf,
- uint8* u_buf,
- uint8* v_buf) {
- int sum_b = 0;
- int sum_g = 0;
- int sum_r = 0;
- int r, g, b;
-
- CONVERT_Y(rgb_buf, y_buf);
- *u_buf++ = RGBToU(r, g, b, 0);
- *v_buf++ = RGBToV(r, g, b, 0);
-}
-
-static void ConvertRGB32ToYUVRow_SSE2(const uint8* rgb_buf_1,
- const uint8* rgb_buf_2,
- uint8* y_buf_1,
- uint8* y_buf_2,
- uint8* u_buf,
- uint8* v_buf,
- int width) {
- while (width >= 4) {
- // Name for the Y pixels:
- // Row 1: a b c d
- // Row 2: e f g h
- //
- // First row 4 pixels.
- __m128i rgb_row_1 = _mm_loadu_si128(
- reinterpret_cast<const __m128i*>(rgb_buf_1));
- __m128i zero_1 = _mm_xor_si128(rgb_row_1, rgb_row_1);
-
- __m128i y_table = _mm_load_si128(
- reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable));
-
- __m128i rgb_a_b = _mm_unpackhi_epi8(rgb_row_1, zero_1);
- rgb_a_b = _mm_madd_epi16(rgb_a_b, y_table);
-
- __m128i rgb_c_d = _mm_unpacklo_epi8(rgb_row_1, zero_1);
- rgb_c_d = _mm_madd_epi16(rgb_c_d, y_table);
-
- // Do a crazh shuffle so that we get:
- // v------------ Multiply Add
- // BG: a b c d
- // A0: a b c d
- __m128i bg_abcd = _mm_castps_si128(
- _mm_shuffle_ps(
- _mm_castsi128_ps(rgb_c_d),
- _mm_castsi128_ps(rgb_a_b),
- (3 << 6) | (1 << 4) | (3 << 2) | 1));
- __m128i r_abcd = _mm_castps_si128(
- _mm_shuffle_ps(
- _mm_castsi128_ps(rgb_c_d),
- _mm_castsi128_ps(rgb_a_b),
- (2 << 6) | (2 << 2)));
- __m128i y_abcd = _mm_add_epi32(bg_abcd, r_abcd);
-
- // Down shift back to 8bits range.
- __m128i y_offset = _mm_load_si128(
- reinterpret_cast<const __m128i*>(kYOffset));
- y_abcd = _mm_srai_epi32(y_abcd, FIX_SHIFT);
- y_abcd = _mm_add_epi32(y_abcd, y_offset);
- y_abcd = _mm_packs_epi32(y_abcd, y_abcd);
- y_abcd = _mm_packus_epi16(y_abcd, y_abcd);
- *reinterpret_cast<uint32*>(y_buf_1) = _mm_cvtsi128_si32(y_abcd);
- y_buf_1 += 4;
-
- // Second row 4 pixels.
- __m128i rgb_row_2 = _mm_loadu_si128(
- reinterpret_cast<const __m128i*>(rgb_buf_2));
- __m128i zero_2 = _mm_xor_si128(rgb_row_2, rgb_row_2);
- __m128i rgb_e_f = _mm_unpackhi_epi8(rgb_row_2, zero_2);
- __m128i rgb_g_h = _mm_unpacklo_epi8(rgb_row_2, zero_2);
-
- // Add two rows together.
- __m128i rgb_ae_bf =
- _mm_add_epi16(_mm_unpackhi_epi8(rgb_row_1, zero_2), rgb_e_f);
- __m128i rgb_cg_dh =
- _mm_add_epi16(_mm_unpacklo_epi8(rgb_row_1, zero_2), rgb_g_h);
-
- // Multiply add like the previous row.
- rgb_e_f = _mm_madd_epi16(rgb_e_f, y_table);
- rgb_g_h = _mm_madd_epi16(rgb_g_h, y_table);
-
- __m128i bg_efgh = _mm_castps_si128(
- _mm_shuffle_ps(_mm_castsi128_ps(rgb_g_h),
- _mm_castsi128_ps(rgb_e_f),
- (3 << 6) | (1 << 4) | (3 << 2) | 1));
- __m128i r_efgh = _mm_castps_si128(
- _mm_shuffle_ps(_mm_castsi128_ps(rgb_g_h),
- _mm_castsi128_ps(rgb_e_f),
- (2 << 6) | (2 << 2)));
- __m128i y_efgh = _mm_add_epi32(bg_efgh, r_efgh);
- y_efgh = _mm_srai_epi32(y_efgh, FIX_SHIFT);
- y_efgh = _mm_add_epi32(y_efgh, y_offset);
- y_efgh = _mm_packs_epi32(y_efgh, y_efgh);
- y_efgh = _mm_packus_epi16(y_efgh, y_efgh);
- *reinterpret_cast<uint32*>(y_buf_2) = _mm_cvtsi128_si32(y_efgh);
- y_buf_2 += 4;
-
- __m128i rgb_ae_cg = _mm_castps_si128(
- _mm_shuffle_ps(_mm_castsi128_ps(rgb_cg_dh),
- _mm_castsi128_ps(rgb_ae_bf),
- (3 << 6) | (2 << 4) | (3 << 2) | 2));
- __m128i rgb_bf_dh = _mm_castps_si128(
- _mm_shuffle_ps(_mm_castsi128_ps(rgb_cg_dh),
- _mm_castsi128_ps(rgb_ae_bf),
- (1 << 6) | (1 << 2)));
-
- // This is a 2x2 subsampling for 2 pixels.
- __m128i rgb_abef_cdgh = _mm_add_epi16(rgb_ae_cg, rgb_bf_dh);
-
- // Do a multiply add with U table.
- __m128i u_a_b = _mm_madd_epi16(
- rgb_abef_cdgh,
- _mm_load_si128(
- reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable + 8)));
- u_a_b = _mm_add_epi32(_mm_shuffle_epi32(u_a_b, ((3 << 2) | 1)),
- _mm_shuffle_epi32(u_a_b, (2 << 2)));
- // Right shift 14 because of 12 from fixed point and 2 from subsampling.
- u_a_b = _mm_srai_epi32(u_a_b, FIX_SHIFT + 2);
- __m128i uv_offset = _mm_slli_epi32(y_offset, 3);
- u_a_b = _mm_add_epi32(u_a_b, uv_offset);
- u_a_b = _mm_packs_epi32(u_a_b, u_a_b);
- u_a_b = _mm_packus_epi16(u_a_b, u_a_b);
- *reinterpret_cast<uint16*>(u_buf) = _mm_extract_epi16(u_a_b, 0);
- u_buf += 2;
-
- __m128i v_a_b = _mm_madd_epi16(
- rgb_abef_cdgh,
- _mm_load_si128(
- reinterpret_cast<const __m128i*>(ConvertRGBAToYUV_kTable + 16)));
- v_a_b = _mm_add_epi32(_mm_shuffle_epi32(v_a_b, ((3 << 2) | 1)),
- _mm_shuffle_epi32(v_a_b, (2 << 2)));
- v_a_b = _mm_srai_epi32(v_a_b, FIX_SHIFT + 2);
- v_a_b = _mm_add_epi32(v_a_b, uv_offset);
- v_a_b = _mm_packs_epi32(v_a_b, v_a_b);
- v_a_b = _mm_packus_epi16(v_a_b, v_a_b);
- *reinterpret_cast<uint16*>(v_buf) = _mm_extract_epi16(v_a_b, 0);
- v_buf += 2;
-
- rgb_buf_1 += 16;
- rgb_buf_2 += 16;
-
- // Move forward by 4 pixels.
- width -= 4;
- }
-
- // Just use C code to convert the remaining pixels.
- if (width >= 2) {
- ConvertRGBToYUV_V2H2(rgb_buf_1, rgb_buf_2, y_buf_1, y_buf_2, u_buf, v_buf);
- rgb_buf_1 += 8;
- rgb_buf_2 += 8;
- y_buf_1 += 2;
- y_buf_2 += 2;
- ++u_buf;
- ++v_buf;
- width -= 2;
- }
-
- if (width)
- ConvertRGBToYUV_V2H1(rgb_buf_1, rgb_buf_2, y_buf_1, y_buf_2, u_buf, v_buf);
-}
-
-extern void ConvertRGB32ToYUV_SSE2(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- while (height >= 2) {
- ConvertRGB32ToYUVRow_SSE2(rgbframe,
- rgbframe + rgbstride,
- yplane,
- yplane + ystride,
- uplane,
- vplane,
- width);
- rgbframe += 2 * rgbstride;
- yplane += 2 * ystride;
- uplane += uvstride;
- vplane += uvstride;
- height -= 2;
- }
-
- if (!height)
- return;
-
- // Handle the last row.
- while (width >= 2) {
- ConvertRGBToYUV_V1H2(rgbframe, yplane, uplane, vplane);
- rgbframe += 8;
- yplane += 2;
- ++uplane;
- ++vplane;
- width -= 2;
- }
-
- if (width)
- ConvertRGBToYUV_V1H1(rgbframe, yplane, uplane, vplane);
-}
-
-void ConvertRGB32ToYUV_SSE2_Reference(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- while (height >= 2) {
- int i = 0;
-
- // Convert a 2x2 block.
- while (i + 2 <= width) {
- ConvertRGBToYUV_V2H2(rgbframe + i * 4,
- rgbframe + rgbstride + i * 4,
- yplane + i,
- yplane + ystride + i,
- uplane + i / 2,
- vplane + i / 2);
- i += 2;
- }
-
- // Convert the last pixel of two rows.
- if (i < width) {
- ConvertRGBToYUV_V2H1(rgbframe + i * 4,
- rgbframe + rgbstride + i * 4,
- yplane + i,
- yplane + ystride + i,
- uplane + i / 2,
- vplane + i / 2);
- }
-
- rgbframe += 2 * rgbstride;
- yplane += 2 * ystride;
- uplane += uvstride;
- vplane += uvstride;
- height -= 2;
- }
-
- if (!height)
- return;
-
- // Handle the last row.
- while (width >= 2) {
- ConvertRGBToYUV_V1H2(rgbframe, yplane, uplane, vplane);
- rgbframe += 8;
- yplane += 2;
- ++uplane;
- ++vplane;
- width -= 2;
- }
-
- // Handle the last pixel in the last row.
- if (width)
- ConvertRGBToYUV_V1H1(rgbframe, yplane, uplane, vplane);
-}
-
-} // namespace media
diff --git a/src/media/base/simd/convert_rgb_to_yuv_ssse3.asm b/src/media/base/simd/convert_rgb_to_yuv_ssse3.asm
deleted file mode 100644
index f445e98..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_ssse3.asm
+++ /dev/null
@@ -1,317 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses SSE, SSE2, SSE3, and SSSE3, which are supported by all ATOM
-; processors.
-;
- SECTION_TEXT
- CPU SSE, SSE3, SSE3, SSSE3
-
-;
-; XMM registers representing constants. We must not use these registers as
-; destination operands.
-; for (int i = 0; i < 16; i += 4) {
-; xmm7.b[i] = 25; xmm7.b[i+1] = 2; xmm7.b[i+2] = 66; xmm7.b[i+3] = 0;
-; xmm6.b[i] = 0; xmm6.b[i+1] = 127; xmm6.b[i+2] = 0; xmm6.b[i+3] = 0;
-; xmm5.b[i] = 112; xmm5.b[i+1] = -74; xmm5.b[i+2] = -38; xmm5.b[i+3] = 0;
-; xmm4.b[i] = -18; xmm4.b[i+1] = -94; xmm4.b[i+2] = 112; xmm4.b[i+3] = 0;
-; }
-;
-%define XMM_CONST_Y0 xmm7
-%define XMM_CONST_Y1 xmm6
-%define XMM_CONST_U xmm5
-%define XMM_CONST_V xmm4
-%define XMM_CONST_128 xmm3
-
-;
-; LOAD_XMM %1 (xmm), %2 (imm32)
-; Loads an immediate value to an XMM register.
-; %1.d[0] = %1.d[1] = %1.d[2] = %1.d[3] = %2;
-;
-%macro LOAD_XMM 2
- mov TEMPd, %2
- movd %1, TEMPd
- pshufd %1, %1, 00000000B
-%endmacro
-
-;
-; UNPACKRGB %1 (xmm), %2 (imm8)
-; Unpacks one RGB pixel in the specified XMM register.
-; for (int i = 15; i > %2; --i) %1.b[i] = %1.b[i - 1];
-; %1.b[%2] = 0;
-; for (int i = %2 - 1; i >= 0; --i) %1.b[i] = %1.b[i];
-;
-%macro UNPACKRGB 2
- movdqa xmm1, %1
- psrldq xmm1, %2
- pslldq xmm1, %2
- pxor %1, xmm1
- pslldq xmm1, 1
- por %1, xmm1
-%endmacro
-
-;
-; READ_ARGB %1 (xmm), %2 (imm)
-; Read the specified number of ARGB (or RGB) pixels from the source and store
-; them to the destination xmm register. If the input format is RGB, we read RGB
-; pixels and convert them to ARGB pixels. (For this case, the alpha values of
-; the output pixels become 0.)
-;
-%macro READ_ARGB 2
-
-%if PIXELSIZE == 4
-
- ; Read ARGB pixels from the source. (This macro assumes the input buffer may
- ; not be aligned to a 16-byte boundary.)
-%if %2 == 1
- movd %1, DWORD [ARGBq + WIDTHq * 4 * 2]
-%elif %2 == 2
- movq %1, QWORD [ARGBq + WIDTHq * 4 * 2]
-%elif %2 == 4
- movdqu %1, DQWORD [ARGBq + WIDTHq * 4 * 2]
-%else
-%error unsupported number of pixels.
-%endif
-
-%elif PIXELSIZE == 3
-
- ; Read RGB pixels from the source and convert them to ARGB pixels.
-%if %2 == 1
- ; Read one RGB pixel and convert it to one ARGB pixel.
- ; Save the WIDTH register to xmm1. (This macro needs to break it.)
- MOVq xmm1, WIDTHq
-
- ; Once read three bytes from the source to TEMPd, and copy it to the
- ; destination xmm register.
- lea WIDTHq, [WIDTHq + WIDTHq * 2]
- movzx TEMPd, BYTE [ARGBq + WIDTHq * 2 + 2]
- shl TEMPd, 16
- mov TEMPw, WORD [ARGBq + WIDTHq * 2]
- movd %1, TEMPd
-
- ; Restore the WIDTH register.
- MOVq WIDTHq, xmm1
-%elif %2 == 2
- ; Read two RGB pixels and convert them to two ARGB pixels.
- ; Read six bytes from the source to the destination xmm register.
- mov TEMPq, WIDTHq
- lea TEMPq, [TEMPq + TEMPq * 2]
- movd %1, DWORD [ARGBq + TEMPq * 2]
- pinsrw %1, WORD [ARGBq + TEMPq * 2 + 4], 3
-
- ; Fill the alpha values of these RGB pixels with 0 and convert them to two
- ; ARGB pixels.
- UNPACKRGB %1, 3
-%elif %2 == 4
- ; Read four RGB pixels and convert them to four ARGB pixels.
- ; Read twelve bytes from the source to the destination xmm register.
- mov TEMPq, WIDTHq
- lea TEMPq, [TEMPq + TEMPq * 2]
- movq %1, QWORD [ARGBq + TEMPq * 2]
- movd xmm1, DWORD [ARGBq + TEMPq * 2 + 8]
- shufps %1, xmm1, 01000100B
-
- ; Fill the alpha values of these RGB pixels with 0 and convert them to four
- ; ARGB pixels.
- UNPACKRGB %1, 3
- UNPACKRGB %1, 4 + 3
- UNPACKRGB %1, 4 + 4 + 3
-%else
-%error unsupported number of pixels.
-%endif
-
-%else
-%error unsupported PIXELSIZE value.
-%endif
-
-%endmacro
-
-;
-; CALC_Y %1 (xmm), %2 (xmm)
-; Calculates four Y values from four ARGB pixels stored in %2.
-; %1.b[0] = ToByte((25 * B(0) + 129 * G(0) + 66 * R(0) + 128) / 256 + 16);
-; %1.b[1] = ToByte((25 * B(1) + 129 * G(1) + 66 * R(1) + 128) / 256 + 16);
-; %1.b[2] = ToByte((25 * B(2) + 129 * G(2) + 66 * R(2) + 128) / 256 + 16);
-; %1.b[3] = ToByte((25 * B(3) + 129 * G(3) + 66 * R(3) + 128) / 256 + 16);
-;
-%macro CALC_Y 2
- ; To avoid signed saturation, we divide this conversion formula into two
- ; formulae and store their results into two XMM registers %1 and xmm2.
- ; %1.w[0] = 25 * %2.b[0] + 2 * %2.b[1] + 66 * %2.b[2] + 0 * %2.b[3];
- ; %1.w[1] = 25 * %2.b[4] + 2 * %2.b[5] + 66 * %2.b[6] + 0 * %2.b[7];
- ; %1.w[2] = 25 * %2.b[8] + 2 * %2.b[9] + 66 * %2.b[10] + 0 * %2.b[11];
- ; %1.w[3] = 25 * %2.b[12] + 2 * %2.b[13] + 66 * %2.b[14] + 0 * %2.b[15];
- ; xmm2.w[0] = 0 * %2.b[0] + 127 * %2.b[1] + 0 * %2.b[2] + 0 * %2.b[3];
- ; xmm2.w[1] = 0 * %2.b[4] + 127 * %2.b[5] + 0 * %2.b[6] + 0 * %2.b[7];
- ; xmm2.w[2] = 0 * %2.b[8] + 127 * %2.b[9] + 0 * %2.b[10] + 0 * %2.b[11];
- ; xmm2.w[3] = 0 * %2.b[12] + 127 * %2.b[13] + 0 * %2.b[14] + 0 * %2.b[15];
- movdqa %1, %2
- pmaddubsw %1, XMM_CONST_Y0
- phaddsw %1, %1
- movdqa xmm2, %2
- pmaddubsw xmm2, XMM_CONST_Y1
- phaddsw xmm2, xmm2
-
- ; %1.b[0] = ToByte((%1.w[0] + xmm2.w[0] + 128) / 256 + 16);
- ; %1.b[1] = ToByte((%1.w[1] + xmm2.w[1] + 128) / 256 + 16);
- ; %1.b[2] = ToByte((%1.w[2] + xmm2.w[2] + 128) / 256 + 16);
- ; %1.b[3] = ToByte((%1.w[3] + xmm2.w[3] + 128) / 256 + 16);
- paddw %1, xmm2
- movdqa xmm2, XMM_CONST_128
- paddw %1, xmm2
- psrlw %1, 8
- psrlw xmm2, 3
- paddw %1, xmm2
- packuswb %1, %1
-%endmacro
-
-;
-; INIT_UV %1 (r32), %2 (reg) %3 (imm)
-;
-%macro INIT_UV 3
-
-%if SUBSAMPLING == 1 && LINE == 1
-%if %3 == 1 || %3 == 2
- movzx %1, BYTE [%2 + WIDTHq]
-%elif %3 == 4
- movzx %1, WORD [%2 + WIDTHq]
-%else
-%error unsupported number of pixels.
-%endif
-%endif
-
-%endmacro
-
-;
-; CALC_UV %1 (xmm), %2 (xmm), %3 (xmm), %4 (r32)
-; Calculates two U (or V) values from four ARGB pixels stored in %2.
-; if %3 == XMM_CONST_U
-; if (SUBSAMPLING) {
-; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
-; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
-; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
-; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
-; } else {
-; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
-; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
-; }
-; if %3 == XMM_CONST_V
-; %1.b[0] = ToByte((-18 * B(0) - 94 * G(0) + 112 * R(0) + 128) / 256 + 128);
-; %1.b[1] = ToByte((-18 * B(2) - 94 * G(2) + 112 * R(2) + 128) / 256 + 128);
-;
-%macro CALC_UV 4
- ; for (int i = 0; i < 4; ++i) {
- ; %1.w[i] = 0;
- ; for (int j = 0; j < 4; ++j)
- ; %1.w[i] += %3.b[i * 4 + j] + %2.b[i * 4 + j];
- ; }
- movdqa %1, %2
- pmaddubsw %1, %3
- phaddsw %1, %1
-
-%if SUBSAMPLING == 1
- ; %1.w[0] = (%1.w[0] + %1.w[1] + 1) / 2;
- ; %1.w[1] = (%1.w[1] + %1.w[0] + 1) / 2;
- ; %1.w[2] = (%1.w[2] + %1.w[3] + 1) / 2;
- ; %1.w[3] = (%1.w[3] + %1.w[2] + 1) / 2;
- pshuflw xmm2, %1, 10110001B
- pavgw %1, xmm2
-%endif
-
- ; %1.b[0] = ToByte((%1.w[0] + 128) / 256 + 128);
- ; %1.b[1] = ToByte((%1.w[2] + 128) / 256 + 128);
- pshuflw %1, %1, 10001000B
- paddw %1, XMM_CONST_128
- psraw %1, 8
- paddw %1, XMM_CONST_128
- packuswb %1, %1
-
-%if SUBSAMPLING == 1 && LINE == 1
- ; %1.b[0] = (%1.b[0] + %3.b[0] + 1) / 2;
- ; %1.b[1] = (%1.b[1] + %3.b[1] + 1) / 2;
- movd xmm2, %4
- pavgb %1, xmm2
-%endif
-%endmacro
-
-;
-; extern "C" void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertARGBToYUVRow_SSSE3
-%define PIXELSIZE 4
-%define SUBSAMPLING 0
-%define LINE 0
-%include "convert_rgb_to_yuv_ssse3.inc"
-
-;
-; extern "C" void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertRGBToYUVRow_SSSE3
-%define PIXELSIZE 3
-%define SUBSAMPLING 0
-%define LINE 0
-%include "convert_rgb_to_yuv_ssse3.inc"
-
-;
-; extern "C" void ConvertARGBToYUVEven_SSSE3(const uint8* argb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertARGBToYUVEven_SSSE3
-%define PIXELSIZE 4
-%define SUBSAMPLING 1
-%define LINE 0
-%include "convert_rgb_to_yuv_ssse3.inc"
-
-;
-; extern "C" void ConvertARGBToYUVOdd_SSSE3(const uint8* argb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertARGBToYUVOdd_SSSE3
-%define PIXELSIZE 4
-%define SUBSAMPLING 1
-%define LINE 1
-%include "convert_rgb_to_yuv_ssse3.inc"
-
-;
-; extern "C" void ConvertRGBToYUVEven_SSSE3(const uint8* rgb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertRGBToYUVEven_SSSE3
-%define PIXELSIZE 3
-%define SUBSAMPLING 1
-%define LINE 0
-%include "convert_rgb_to_yuv_ssse3.inc"
-
-;
-; extern "C" void ConvertRGBToYUVOdd_SSSE3(const uint8* rgb,
-; uint8* y,
-; uint8* u,
-; uint8* v,
-; int width);
-;
-%define SYMBOL ConvertRGBToYUVOdd_SSSE3
-%define PIXELSIZE 3
-%define SUBSAMPLING 1
-%define LINE 1
-%include "convert_rgb_to_yuv_ssse3.inc"
diff --git a/src/media/base/simd/convert_rgb_to_yuv_ssse3.cc b/src/media/base/simd/convert_rgb_to_yuv_ssse3.cc
deleted file mode 100644
index e956926..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_ssse3.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/convert_rgb_to_yuv.h"
-
-#include "build/build_config.h"
-#include "media/base/simd/convert_rgb_to_yuv_ssse3.h"
-
-namespace media {
-
-void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- for (; height >= 2; height -= 2) {
- ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
- rgbframe += rgbstride;
- yplane += ystride;
-
- ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
- rgbframe += rgbstride;
- yplane += ystride;
-
- uplane += uvstride;
- vplane += uvstride;
- }
-
- if (height)
- ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
-}
-
-void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- for (; height >= 2; height -= 2) {
- ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
- rgbframe += rgbstride;
- yplane += ystride;
-
- ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
- rgbframe += rgbstride;
- yplane += ystride;
-
- uplane += uvstride;
- vplane += uvstride;
- }
-
- if (height)
- ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
-}
-
-} // namespace media
diff --git a/src/media/base/simd/convert_rgb_to_yuv_ssse3.h b/src/media/base/simd/convert_rgb_to_yuv_ssse3.h
deleted file mode 100644
index 84557d1..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_ssse3.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
-#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// The header file for ASM functions that convert a row of RGB pixels with SSSE3
-// instructions so we can call them from C++ code. These functions are
-// implemented in "convert_rgb_to_yuv_ssse3.asm".
-
-// Convert a row of 24-bit RGB pixels to YV12 pixels.
-void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
- uint8* y,
- uint8* u,
- uint8* v,
- int width);
-
-// Convert a row of 32-bit RGB pixels to YV12 pixels.
-void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
- uint8* y,
- uint8* u,
- uint8* v,
- int width);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
diff --git a/src/media/base/simd/convert_rgb_to_yuv_ssse3.inc b/src/media/base/simd/convert_rgb_to_yuv_ssse3.inc
deleted file mode 100644
index 35c0db9..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_ssse3.inc
+++ /dev/null
@@ -1,200 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-;
-; void SYMBOL(const uint8* argb, uint8* y, uint8* u, uint8* v, int width);
-;
-; The main code that converts RGB pixels to YUV pixels. This function roughly
-; consists of three parts: converting one ARGB pixel to YUV pixels, converting
-; two ARGB pixels to YUV pixels, and converting four ARGB pixels to YUV pixels.
-; To write the structure of this function in C, it becomes the snippet listed
-; below.
-;
-; if (width & 1) {
-; --width;
-; // Convert one ARGB pixel to one Y pixel, one U pixel, and one V pixel.
-; }
-;
-; if (width & 2) {
-; width -= 2;
-; // Convert two ARGB pixels to two Y pixels, one U pixel, and one V pixel.
-; }
-;
-; while (width) {
-; width -= 4;
-; // Convert four ARGB pixels to four Y pixels, two U pixels, and two V
-; // pixels.
-; }
-;
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-mangle(SYMBOL):
- %assign stack_offset 0
- PROLOGUE 5, 6, 8, ARGB, Y, U, V, WIDTH, TEMP
-
- ; Initialize constants used in this function. (We use immediates to avoid
- ; dependency onto GOT.)
- LOAD_XMM XMM_CONST_Y0, 0x00420219
- LOAD_XMM XMM_CONST_Y1, 0x00007F00
- LOAD_XMM XMM_CONST_U, 0x00DAB670
- LOAD_XMM XMM_CONST_V, 0x0070A2EE
- LOAD_XMM XMM_CONST_128, 0x00800080
-
-.convert_one_pixel:
- ; Divide the input width by two so it represents the offsets for u[] and v[].
- ; When the width is odd, We read the rightmost ARGB pixel and convert its
- ; colorspace to YUV. This code stores one Y pixel, one U pixel, and one V
- ; pixel.
- sar WIDTHq, 1
- jnc .convert_two_pixels
-
- ; Read one ARGB (or RGB) pixel.
- READ_ARGB xmm0, 1
-
- ; Calculate y[0] from one RGB pixel read above.
- CALC_Y xmm1, xmm0
- movd TEMPd, xmm1
- mov BYTE [Yq + WIDTHq * 2], TEMPb
-
- ; Calculate u[0] from one RGB pixel read above. If this is an odd line, the
- ; output pixel contains the U value calculated in the previous call. We also
- ; read this pixel and calculate their average.
- INIT_UV TEMPd, Uq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
- movd TEMPd, xmm1
- mov BYTE [Uq + WIDTHq], TEMPb
-
- ; Calculate v[0] from one RGB pixel. Same as u[0], we read the result of the
- ; previous call and get their average.
- INIT_UV TEMPd, Uq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
- movd TEMPd, xmm1
- mov BYTE [Vq + WIDTHq], TEMPb
-
-.convert_two_pixels:
- ; If the input width is not a multiple of four, read the rightmost two ARGB
- ; pixels and convert their colorspace to YUV. This code stores two Y pixels,
- ; one U pixel, and one V pixel.
- test WIDTHb, 2 / 2
- jz .convert_four_pixels
- sub WIDTHb, 2 / 2
-
- ; Read two ARGB (or RGB) pixels.
- READ_ARGB xmm0, 2
-
- ; Calculate r[0] and r[1] from two RGB pixels read above.
- CALC_Y xmm1, xmm0
- movd TEMPd, xmm1
- mov WORD [Yq + WIDTHq * 2], TEMPw
-
- ; Skip calculating u and v if the output buffer is NULL.
- test Uq, Uq
- jz .convert_four_pixels
-
- ; Calculate u[0] from two RGB pixels read above. (For details, read the above
- ; comment in .convert_one_pixel).
- INIT_UV TEMPd, Uq, 2
- CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
- movd TEMPd, xmm1
- mov BYTE [Uq + WIDTHq], TEMPb
-
- ; Calculate v[0] from two RGB pixels read above.
- INIT_UV TEMPd, Vq, 2
- CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
- movd TEMPd, xmm1
- mov BYTE [Vq + WIDTHq], TEMPb
-
-.convert_four_pixels:
- ; Read four ARGB pixels and convert their colorspace to YUV. This code stores
- ; four Y pixels, two U pixels, and two V pixels.
- test WIDTHq, WIDTHq
- jz .convert_finish
-
-%if PIXELSIZE == 4
- ; Check if the input buffer is aligned to a 16-byte boundary and use movdqa
- ; for reading the ARGB pixels.
- test ARGBw, 15
- jnz .convert_four_pixels_unaligned
-
-.convert_four_pixels_aligned:
- sub WIDTHq, 4 / 2
-
- ; Read four ARGB pixels. (We can use movdqa here since we have checked if the
- ; source address is aligned.)
- movdqa xmm0, DQWORD [ARGBq + WIDTHq * 4 * 2]
-
- ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
- CALC_Y xmm1, xmm0
- movd DWORD [Yq + WIDTHq * 2], xmm1
-
-%if SUBSAMPLING == 0
- ; Skip calculating u and v if the output buffer is NULL, which means we are
- ; converting an odd line. (When we enable subsampling, these buffers must
- ; contain the u and v values for the previous call, i.e. these variables must
- ; not be NULL.)
- test Uq, Uq
- jz .convert_four_pixels_aligned_next
-%endif
-
- ; Calculate u[0] and u[1] from four ARGB pixels read above.
- INIT_UV TEMPd, Uq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
- movd TEMPd, xmm1
- mov WORD [Uq + WIDTHq], TEMPw
-
- ; Calculate v[0] and v[1] from four ARGB pixels read above.
- INIT_UV TEMPd, Vq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
- movd TEMPd, xmm1
- mov WORD [Vq + WIDTHq], TEMPw
-
-%if SUBSAMPLING == 0
-.convert_four_pixels_aligned_next:
-%endif
-
- test WIDTHq, WIDTHq
- jnz .convert_four_pixels_aligned
-
- jmp .convert_finish
-%endif
-
-.convert_four_pixels_unaligned:
- sub WIDTHq, 4 / 2
-
- ; Read four ARGB (or RGB) pixels.
- READ_ARGB xmm0, 4
-
- ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
- CALC_Y xmm1, xmm0
- movd DWORD [Yq + WIDTHq * 2], xmm1
-
-%if SUBSAMPLING == 0
- ; Skip calculating u and v if the output buffer is NULL.
- test Uq, Uq
- jz .convert_four_pixels_unaligned_next
-%endif
-
- ; Calculate u[0] and u[1] from the input ARGB pixels.
- INIT_UV TEMPd, Uq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
- movd TEMPd, xmm1
- mov WORD [Uq + WIDTHq], TEMPw
-
- ; Calculate v[0] and v[1] from the input ARGB pixels.
- INIT_UV TEMPd, Vq, 4
- CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
- movd TEMPd, xmm1
- mov WORD [Vq + WIDTHq], TEMPw
-
-%if SUBSAMPLING == 0
-.convert_four_pixels_unaligned_next:
-%endif
-
- test WIDTHq, WIDTHq
- jnz .convert_four_pixels_unaligned
-
-.convert_finish:
- ; Just exit this function since this is a void function.
- RET
diff --git a/src/media/base/simd/convert_rgb_to_yuv_unittest.cc b/src/media/base/simd/convert_rgb_to_yuv_unittest.cc
deleted file mode 100644
index 08aa4df..0000000
--- a/src/media/base/simd/convert_rgb_to_yuv_unittest.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/cpu.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/simd/convert_rgb_to_yuv.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// Reference code that converts RGB pixels to YUV pixels.
-int ConvertRGBToY(const uint8* rgb) {
- int y = 25 * rgb[0] + 129 * rgb[1] + 66 * rgb[2];
- y = ((y + 128) >> 8) + 16;
- return std::max(0, std::min(255, y));
-}
-
-int ConvertRGBToU(const uint8* rgb, int size) {
- int u = 112 * rgb[0] - 74 * rgb[1] - 38 * rgb[2];
- u = ((u + 128) >> 8) + 128;
- return std::max(0, std::min(255, u));
-}
-
-int ConvertRGBToV(const uint8* rgb, int size) {
- int v = -18 * rgb[0] - 94 * rgb[1] + 112 * rgb[2];
- v = ((v + 128) >> 8) + 128;
- return std::max(0, std::min(255, v));
-}
-
-} // namespace
-
-// A side-by-side test that verifies our ASM functions that convert RGB pixels
-// to YUV pixels can output the expected results. This test converts RGB pixels
-// to YUV pixels with our ASM functions (which use SSE, SSE2, SSE3, and SSSE3)
-// and compare the output YUV pixels with the ones calculated with out reference
-// functions implemented in C++.
-TEST(YUVConvertTest, SideBySideRGB) {
- // We skip this test on PCs which does not support SSE3 because this test
- // needs it.
- base::CPU cpu;
- if (!cpu.has_ssse3())
- return;
-
- // This test checks a subset of all RGB values so this test does not take so
- // long time.
- const int kStep = 8;
- const int kWidth = 256 / kStep;
-
- for (int size = 3; size <= 4; ++size) {
- // Create the output buffers.
- scoped_array<uint8> rgb(new uint8[kWidth * size]);
- scoped_array<uint8> y(new uint8[kWidth]);
- scoped_array<uint8> u(new uint8[kWidth / 2]);
- scoped_array<uint8> v(new uint8[kWidth / 2]);
-
- // Choose the function that converts from RGB pixels to YUV ones.
- void (*convert)(const uint8*, uint8*, uint8*, uint8*,
- int, int, int, int, int) = NULL;
- if (size == 3)
- convert = media::ConvertRGB24ToYUV_SSSE3;
- else
- convert = media::ConvertRGB32ToYUV_SSSE3;
-
- int total_error = 0;
- for (int r = 0; r < kWidth; ++r) {
- for (int g = 0; g < kWidth; ++g) {
-
- // Fill the input pixels.
- for (int b = 0; b < kWidth; ++b) {
- rgb[b * size + 0] = b * kStep;
- rgb[b * size + 1] = g * kStep;
- rgb[b * size + 2] = r * kStep;
- if (size == 4)
- rgb[b * size + 3] = 255;
- }
-
- // Convert the input RGB pixels to YUV ones.
- convert(rgb.get(), y.get(), u.get(), v.get(), kWidth, 1, kWidth * size,
- kWidth, kWidth / 2);
-
- // Check the output Y pixels.
- for (int i = 0; i < kWidth; ++i) {
- const uint8* p = &rgb[i * size];
- int error = ConvertRGBToY(p) - y[i];
- total_error += error > 0 ? error : -error;
- }
-
- // Check the output U pixels.
- for (int i = 0; i < kWidth / 2; ++i) {
- const uint8* p = &rgb[i * 2 * size];
- int error = ConvertRGBToU(p, size) - u[i];
- total_error += error > 0 ? error : -error;
- }
-
- // Check the output V pixels.
- for (int i = 0; i < kWidth / 2; ++i) {
- const uint8* p = &rgb[i * 2 * size];
- int error = ConvertRGBToV(p, size) - v[i];
- total_error += error > 0 ? error : -error;
- }
- }
- }
-
- EXPECT_EQ(0, total_error);
- }
-}
diff --git a/src/media/base/simd/convert_yuv_to_rgb.h b/src/media/base/simd/convert_yuv_to_rgb.h
deleted file mode 100644
index 164ad11..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
-#define MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
-
-#include "base/basictypes.h"
-#include "media/base/yuv_convert.h"
-
-namespace media {
-
-typedef void (*ConvertYUVToRGB32Proc)(const uint8*,
- const uint8*,
- const uint8*,
- uint8*,
- int,
- int,
- int,
- int,
- int,
- YUVType);
-
-void ConvertYUVToRGB32_C(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type);
-
-void ConvertYUVToRGB32_SSE(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type);
-
-void ConvertYUVToRGB32_MMX(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type);
-
-} // namespace media
-
-// Assembly functions are declared without namespace.
-extern "C" {
-
-typedef void (*ConvertYUVToRGB32RowProc)(const uint8*,
- const uint8*,
- const uint8*,
- uint8*,
- int);
-typedef void (*ScaleYUVToRGB32RowProc)(const uint8*,
- const uint8*,
- const uint8*,
- uint8*,
- int,
- int);
-
-void ConvertYUVToRGB32Row_C(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width);
-
-void ConvertYUVToRGB32Row_MMX(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width);
-
-void ConvertYUVToRGB32Row_SSE(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width);
-
-void ScaleYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void ScaleYUVToRGB32Row_MMX(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int dest_width,
- int source_x,
- int source_dx);
-
-void LinearScaleYUVToRGB32Row_MMX(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-void LinearScaleYUVToRGB32Row_MMX_X64(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx);
-
-} // extern "C"
-
-#endif // MEDIA_BASE_SIMD_CONVERT_YUV_TO_RGB_H_
diff --git a/src/media/base/simd/convert_yuv_to_rgb_c.cc b/src/media/base/simd/convert_yuv_to_rgb_c.cc
deleted file mode 100644
index db6e557..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb_c.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/simd/yuv_to_rgb_table.h"
-
-#define packuswb(x) ((x) < 0 ? 0 : ((x) > 255 ? 255 : (x)))
-#define paddsw(x, y) (((x) + (y)) < -32768 ? -32768 : \
- (((x) + (y)) > 32767 ? 32767 : ((x) + (y))))
-
-static inline void ConvertYUVToRGB32_C(uint8 y,
- uint8 u,
- uint8 v,
- uint8* rgb_buf) {
- int b = kCoefficientsRgbY[256+u][0];
- int g = kCoefficientsRgbY[256+u][1];
- int r = kCoefficientsRgbY[256+u][2];
- int a = kCoefficientsRgbY[256+u][3];
-
- b = paddsw(b, kCoefficientsRgbY[512+v][0]);
- g = paddsw(g, kCoefficientsRgbY[512+v][1]);
- r = paddsw(r, kCoefficientsRgbY[512+v][2]);
- a = paddsw(a, kCoefficientsRgbY[512+v][3]);
-
- b = paddsw(b, kCoefficientsRgbY[y][0]);
- g = paddsw(g, kCoefficientsRgbY[y][1]);
- r = paddsw(r, kCoefficientsRgbY[y][2]);
- a = paddsw(a, kCoefficientsRgbY[y][3]);
-
- b >>= 6;
- g >>= 6;
- r >>= 6;
- a >>= 6;
-
- *reinterpret_cast<uint32*>(rgb_buf) = (packuswb(b)) |
- (packuswb(g) << 8) |
- (packuswb(r) << 16) |
- (packuswb(a) << 24);
-}
-
-extern "C" {
-
-void ConvertYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width) {
- for (int x = 0; x < width; x += 2) {
- uint8 u = u_buf[x >> 1];
- uint8 v = v_buf[x >> 1];
- uint8 y0 = y_buf[x];
- ConvertYUVToRGB32_C(y0, u, v, rgb_buf);
- if ((x + 1) < width) {
- uint8 y1 = y_buf[x + 1];
- ConvertYUVToRGB32_C(y1, u, v, rgb_buf + 4);
- }
- rgb_buf += 8; // Advance 2 pixels.
- }
-}
-
-// 16.16 fixed point is used. A shift by 16 isolates the integer.
-// A shift by 17 is used to further subsample the chrominence channels.
-// & 0xffff isolates the fixed point fraction. >> 2 to get the upper 2 bits,
-// for 1/65536 pixel accurate interpolation.
-void ScaleYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx) {
- int x = 0;
- for (int i = 0; i < width; i += 2) {
- int y = y_buf[x >> 16];
- int u = u_buf[(x >> 17)];
- int v = v_buf[(x >> 17)];
- ConvertYUVToRGB32_C(y, u, v, rgb_buf);
- x += source_dx;
- if ((i + 1) < width) {
- y = y_buf[x >> 16];
- ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
- x += source_dx;
- }
- rgb_buf += 8;
- }
-}
-
-void LinearScaleYUVToRGB32Row_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int width,
- int source_dx) {
- // Avoid point-sampling for down-scaling by > 2:1.
- int source_x = 0;
- if (source_dx >= 0x20000)
- source_x += 0x8000;
- LinearScaleYUVToRGB32RowWithRange_C(y_buf, u_buf, v_buf, rgb_buf, width,
- source_x, source_dx);
-}
-
-void LinearScaleYUVToRGB32RowWithRange_C(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int dest_width,
- int x,
- int source_dx) {
- for (int i = 0; i < dest_width; i += 2) {
- int y0 = y_buf[x >> 16];
- int y1 = y_buf[(x >> 16) + 1];
- int u0 = u_buf[(x >> 17)];
- int u1 = u_buf[(x >> 17) + 1];
- int v0 = v_buf[(x >> 17)];
- int v1 = v_buf[(x >> 17) + 1];
- int y_frac = (x & 65535);
- int uv_frac = ((x >> 1) & 65535);
- int y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
- int u = (uv_frac * u1 + (uv_frac ^ 65535) * u0) >> 16;
- int v = (uv_frac * v1 + (uv_frac ^ 65535) * v0) >> 16;
- ConvertYUVToRGB32_C(y, u, v, rgb_buf);
- x += source_dx;
- if ((i + 1) < dest_width) {
- y0 = y_buf[x >> 16];
- y1 = y_buf[(x >> 16) + 1];
- y_frac = (x & 65535);
- y = (y_frac * y1 + (y_frac ^ 65535) * y0) >> 16;
- ConvertYUVToRGB32_C(y, u, v, rgb_buf+4);
- x += source_dx;
- }
- rgb_buf += 8;
- }
-}
-
-}
-
-namespace media {
-
-void ConvertYUVToRGB32_C(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
- for (int y = 0; y < height; ++y) {
- uint8* rgb_row = rgbframe + y * rgbstride;
- const uint8* y_ptr = yplane + y * ystride;
- const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
- const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
-
- ConvertYUVToRGB32Row_C(y_ptr,
- u_ptr,
- v_ptr,
- rgb_row,
- width);
- }
-}
-
-} // namespace media
diff --git a/src/media/base/simd/convert_yuv_to_rgb_mmx.asm b/src/media/base/simd/convert_yuv_to_rgb_mmx.asm
deleted file mode 100644
index e044474..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb_mmx.asm
+++ /dev/null
@@ -1,22 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX instructions.
-;
- SECTION_TEXT
- CPU MMX
-
-; Use movq to save the output.
-%define MOVQ movq
-
-; extern "C" void ConvertYUVToRGB32Row_MMX(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width);
-%define SYMBOL ConvertYUVToRGB32Row_MMX
-%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/convert_yuv_to_rgb_mmx.inc b/src/media/base/simd/convert_yuv_to_rgb_mmx.inc
deleted file mode 100644
index b9555ce..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb_mmx.inc
+++ /dev/null
@@ -1,119 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-; Non-PIC code is the fastest so use this if possible.
-%ifndef PIC
-mangle(SYMBOL):
- %assign stack_offset 0
- PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMPU, TEMPV
- extern mangle(kCoefficientsRgbY)
- jmp .convertend
-
-.convertloop:
- movzx TEMPUd, BYTE [Uq]
- add Uq, 1
- movzx TEMPVd, BYTE [Vq]
- add Vq, 1
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
- movzx TEMPUd, BYTE [Yq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
- movzx TEMPVd, BYTE [Yq + 1]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
- add Yq, 2
- movq mm2, [mangle(kCoefficientsRgbY) + 8 * TEMPVq]
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 6
- psraw mm2, 6
- packuswb mm1, mm2
- MOVQ [ARGBq], mm1
- add ARGBq, 8
-
-.convertend:
- sub WIDTHq, 2
- jns .convertloop
-
- ; If number of pixels is odd then compute it.
- and WIDTHq, 1
- jz .convertdone
-
- movzx TEMPUd, BYTE [Uq]
- movq mm0, [mangle(kCoefficientsRgbY) + 2048 + 8 * TEMPUq]
- movzx TEMPVd, BYTE [Vq]
- paddsw mm0, [mangle(kCoefficientsRgbY) + 4096 + 8 * TEMPVq]
- movzx TEMPUd, BYTE [Yq]
- movq mm1, [mangle(kCoefficientsRgbY) + 8 * TEMPUq]
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd [ARGBq], mm1
-
-.convertdone:
- RET
-%endif
-
-; With PIC code we need to load the address of mangle(kCoefficientsRgbY).
-; This code is slower than the above version.
-%ifdef PIC
-mangle(SYMBOL):
- %assign stack_offset 0
- PROLOGUE 5, 7, 3, Y, U, V, ARGB, WIDTH, TEMP, TABLE
-
- extern mangle(kCoefficientsRgbY)
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
-
- jmp .convertend
-
-.convertloop:
- movzx TEMPd, BYTE [Uq]
- movq mm0, [TABLEq + 2048 + 8 * TEMPq]
- add Uq, 1
-
- movzx TEMPd, BYTE [Vq]
- paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
- add Vq, 1
-
- movzx TEMPd, BYTE [Yq]
- movq mm1, [TABLEq + 8 * TEMPq]
-
- movzx TEMPd, BYTE [Yq + 1]
- movq mm2, [TABLEq + 8 * TEMPq]
- add Yq, 2
-
- ; Add UV components to Y component.
- paddsw mm1, mm0
- paddsw mm2, mm0
-
- ; Down shift and then pack.
- psraw mm1, 6
- psraw mm2, 6
- packuswb mm1, mm2
- MOVQ [ARGBq], mm1
- add ARGBq, 8
-
-.convertend:
- sub WIDTHq, 2
- jns .convertloop
-
- ; If number of pixels is odd then compute it.
- and WIDTHq, 1
- jz .convertdone
-
- movzx TEMPd, BYTE [Uq]
- movq mm0, [TABLEq + 2048 + 8 * TEMPq]
- movzx TEMPd, BYTE [Vq]
- paddsw mm0, [TABLEq + 4096 + 8 * TEMPq]
- movzx TEMPd, BYTE [Yq]
- movq mm1, [TABLEq + 8 * TEMPq]
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd [ARGBq], mm1
-
-.convertdone:
- RET
-%endif
diff --git a/src/media/base/simd/convert_yuv_to_rgb_sse.asm b/src/media/base/simd/convert_yuv_to_rgb_sse.asm
deleted file mode 100644
index 2f1967a..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb_sse.asm
+++ /dev/null
@@ -1,23 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX and SSE instructions.
-;
- SECTION_TEXT
- CPU MMX, SSE
-
-; Use SSE instruction movntq can write faster.
-%define MOVQ movntq
-
-;
-; extern "C" void ConvertYUVToRGB32Row_SSE(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width);
-%define SYMBOL ConvertYUVToRGB32Row_SSE
-%include "convert_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/convert_yuv_to_rgb_x86.cc b/src/media/base/simd/convert_yuv_to_rgb_x86.cc
deleted file mode 100644
index 3825bdb..0000000
--- a/src/media/base/simd/convert_yuv_to_rgb_x86.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#else
-#include <mmintrin.h>
-#endif
-
-#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/yuv_convert.h"
-
-namespace media {
-
-void ConvertYUVToRGB32_MMX(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
- for (int y = 0; y < height; ++y) {
- uint8* rgb_row = rgbframe + y * rgbstride;
- const uint8* y_ptr = yplane + y * ystride;
- const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
- const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
-
- ConvertYUVToRGB32Row_MMX(y_ptr,
- u_ptr,
- v_ptr,
- rgb_row,
- width);
- }
-
- _mm_empty();
-}
-
-void ConvertYUVToRGB32_SSE(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type) {
- unsigned int y_shift = yuv_type;
- for (int y = 0; y < height; ++y) {
- uint8* rgb_row = rgbframe + y * rgbstride;
- const uint8* y_ptr = yplane + y * ystride;
- const uint8* u_ptr = uplane + (y >> y_shift) * uvstride;
- const uint8* v_ptr = vplane + (y >> y_shift) * uvstride;
-
- ConvertYUVToRGB32Row_SSE(y_ptr,
- u_ptr,
- v_ptr,
- rgb_row,
- width);
- }
-
- _mm_empty();
-}
-
-} // namespace media
diff --git a/src/media/base/simd/filter_yuv.h b/src/media/base/simd/filter_yuv.h
deleted file mode 100644
index 5a9cf11..0000000
--- a/src/media/base/simd/filter_yuv.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SIMD_FILTER_YUV_H_
-#define MEDIA_BASE_SIMD_FILTER_YUV_H_
-
-#include "base/basictypes.h"
-
-namespace media {
-
-typedef void (*FilterYUVRowsProc)(uint8*,
- const uint8*,
- const uint8*,
- int,
- int);
-
-void FilterYUVRows_C(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr,
- int source_width, int source_y_fraction);
-
-void FilterYUVRows_MMX(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr,
- int source_width, int source_y_fraction);
-
-void FilterYUVRows_SSE2(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr,
- int source_width, int source_y_fraction);
-
-} // namespace media
-
-#endif // MEDIA_BASE_SIMD_FILTER_YUV_H_
diff --git a/src/media/base/simd/filter_yuv_c.cc b/src/media/base/simd/filter_yuv_c.cc
deleted file mode 100644
index f292d21..0000000
--- a/src/media/base/simd/filter_yuv_c.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/filter_yuv.h"
-
-namespace media {
-
-void FilterYUVRows_C(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr,
- int source_width, int source_y_fraction) {
- int y1_fraction = source_y_fraction;
- int y0_fraction = 256 - y1_fraction;
- uint8* end = ybuf + source_width;
- uint8* rounded_end = ybuf + (source_width & ~7);
-
- while (ybuf < rounded_end) {
- ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8;
- ybuf[1] = (y0_ptr[1] * y0_fraction + y1_ptr[1] * y1_fraction) >> 8;
- ybuf[2] = (y0_ptr[2] * y0_fraction + y1_ptr[2] * y1_fraction) >> 8;
- ybuf[3] = (y0_ptr[3] * y0_fraction + y1_ptr[3] * y1_fraction) >> 8;
- ybuf[4] = (y0_ptr[4] * y0_fraction + y1_ptr[4] * y1_fraction) >> 8;
- ybuf[5] = (y0_ptr[5] * y0_fraction + y1_ptr[5] * y1_fraction) >> 8;
- ybuf[6] = (y0_ptr[6] * y0_fraction + y1_ptr[6] * y1_fraction) >> 8;
- ybuf[7] = (y0_ptr[7] * y0_fraction + y1_ptr[7] * y1_fraction) >> 8;
- y0_ptr += 8;
- y1_ptr += 8;
- ybuf += 8;
- }
-
- while (ybuf < end) {
- ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8;
- ++ybuf;
- ++y0_ptr;
- ++y1_ptr;
- }
-}
-
-} // namespace media
diff --git a/src/media/base/simd/filter_yuv_mmx.cc b/src/media/base/simd/filter_yuv_mmx.cc
deleted file mode 100644
index 09d62e3..0000000
--- a/src/media/base/simd/filter_yuv_mmx.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#else
-#include <mmintrin.h>
-#include <emmintrin.h>
-#endif
-
-#include "build/build_config.h"
-#include "media/base/simd/filter_yuv.h"
-
-namespace media {
-
-#if defined(COMPILER_MSVC)
-// Warning 4799 is about calling emms before the function exits.
-// We calls emms in a frame level so suppress this warning.
-#pragma warning(disable: 4799)
-#endif
-
-void FilterYUVRows_MMX(uint8* dest,
- const uint8* src0,
- const uint8* src1,
- int width,
- int fraction) {
- int pixel = 0;
-
- // Process the unaligned bytes first.
- int unaligned_width =
- (8 - (reinterpret_cast<uintptr_t>(dest) & 7)) & 7;
- while (pixel < width && pixel < unaligned_width) {
- dest[pixel] = (src0[pixel] * (256 - fraction) +
- src1[pixel] * fraction) >> 8;
- ++pixel;
- }
-
- __m64 zero = _mm_setzero_si64();
- __m64 src1_fraction = _mm_set1_pi16(fraction);
- __m64 src0_fraction = _mm_set1_pi16(256 - fraction);
- const __m64* src0_64 = reinterpret_cast<const __m64*>(src0 + pixel);
- const __m64* src1_64 = reinterpret_cast<const __m64*>(src1 + pixel);
- __m64* dest64 = reinterpret_cast<__m64*>(dest + pixel);
- __m64* end64 = reinterpret_cast<__m64*>(
- reinterpret_cast<uintptr_t>(dest + width) & ~7);
-
- while (dest64 < end64) {
- __m64 src0 = *src0_64++;
- __m64 src1 = *src1_64++;
- __m64 src2 = _mm_unpackhi_pi8(src0, zero);
- __m64 src3 = _mm_unpackhi_pi8(src1, zero);
- src0 = _mm_unpacklo_pi8(src0, zero);
- src1 = _mm_unpacklo_pi8(src1, zero);
- src0 = _mm_mullo_pi16(src0, src0_fraction);
- src1 = _mm_mullo_pi16(src1, src1_fraction);
- src2 = _mm_mullo_pi16(src2, src0_fraction);
- src3 = _mm_mullo_pi16(src3, src1_fraction);
- src0 = _mm_add_pi16(src0, src1);
- src2 = _mm_add_pi16(src2, src3);
- src0 = _mm_srli_pi16(src0, 8);
- src2 = _mm_srli_pi16(src2, 8);
- src0 = _mm_packs_pu16(src0, src2);
- *dest64++ = src0;
- pixel += 8;
- }
-
- while (pixel < width) {
- dest[pixel] = (src0[pixel] * (256 - fraction) +
- src1[pixel] * fraction) >> 8;
- ++pixel;
- }
-}
-
-#if defined(COMPILER_MSVC)
-#pragma warning(default: 4799)
-#endif
-
-} // namespace media
diff --git a/src/media/base/simd/filter_yuv_sse2.cc b/src/media/base/simd/filter_yuv_sse2.cc
deleted file mode 100644
index 84dba5a..0000000
--- a/src/media/base/simd/filter_yuv_sse2.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#else
-#include <mmintrin.h>
-#include <emmintrin.h>
-#endif
-
-#include "media/base/simd/filter_yuv.h"
-
-namespace media {
-
-void FilterYUVRows_SSE2(uint8* dest,
- const uint8* src0,
- const uint8* src1,
- int width,
- int fraction) {
- int pixel = 0;
-
- // Process the unaligned bytes first.
- int unaligned_width =
- (16 - (reinterpret_cast<uintptr_t>(dest) & 15)) & 15;
- while (pixel < width && pixel < unaligned_width) {
- dest[pixel] = (src0[pixel] * (256 - fraction) +
- src1[pixel] * fraction) >> 8;
- ++pixel;
- }
-
- __m128i zero = _mm_setzero_si128();
- __m128i src1_fraction = _mm_set1_epi16(fraction);
- __m128i src0_fraction = _mm_set1_epi16(256 - fraction);
- const __m128i* src0_128 =
- reinterpret_cast<const __m128i*>(src0 + pixel);
- const __m128i* src1_128 =
- reinterpret_cast<const __m128i*>(src1 + pixel);
- __m128i* dest128 = reinterpret_cast<__m128i*>(dest + pixel);
- __m128i* end128 = reinterpret_cast<__m128i*>(
- reinterpret_cast<uintptr_t>(dest + width) & ~15);
-
- while (dest128 < end128) {
- __m128i src0 = _mm_loadu_si128(src0_128);
- __m128i src1 = _mm_loadu_si128(src1_128);
- __m128i src2 = _mm_unpackhi_epi8(src0, zero);
- __m128i src3 = _mm_unpackhi_epi8(src1, zero);
- src0 = _mm_unpacklo_epi8(src0, zero);
- src1 = _mm_unpacklo_epi8(src1, zero);
- src0 = _mm_mullo_epi16(src0, src0_fraction);
- src1 = _mm_mullo_epi16(src1, src1_fraction);
- src2 = _mm_mullo_epi16(src2, src0_fraction);
- src3 = _mm_mullo_epi16(src3, src1_fraction);
- src0 = _mm_add_epi16(src0, src1);
- src2 = _mm_add_epi16(src2, src3);
- src0 = _mm_srli_epi16(src0, 8);
- src2 = _mm_srli_epi16(src2, 8);
- src0 = _mm_packus_epi16(src0, src2);
- *dest128++ = src0;
- ++src0_128;
- ++src1_128;
- pixel += 16;
- }
-
- while (pixel < width) {
- dest[pixel] = (src0[pixel] * (256 - fraction) +
- src1[pixel] * fraction) >> 8;
- ++pixel;
- }
-}
-
-} // namespace media
diff --git a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm b/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
deleted file mode 100644
index 7f7e0e8..0000000
--- a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.asm
+++ /dev/null
@@ -1,23 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX instructions.
-;
- SECTION_TEXT
- CPU MMX
-
-; Use movq to save the output.
-%define MOVQ movq
-
-; void LinearScaleYUVToRGB32Row_MMX(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width,
-; int source_dx);
-%define SYMBOL LinearScaleYUVToRGB32Row_MMX
-%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc b/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
deleted file mode 100644
index 91c06a5..0000000
--- a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx.inc
+++ /dev/null
@@ -1,166 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-mangle(SYMBOL):
- %assign stack_offset 0
-
- extern mangle(kCoefficientsRgbY)
-
-; Parameters are in the following order:
-; 1. Y plane
-; 2. U plane
-; 3. V plane
-; 4. ARGB frame
-; 5. Width
-; 6. Source dx
-
-PROLOGUE 6, 7, 3, Y, R0, R1, ARGB, R2, R3, TEMP
-
-%if gprsize == 8
-%define WORD_SIZE QWORD
-%else
-%define WORD_SIZE DWORD
-%endif
-
-; Define register aliases.
-%define Xq R1q ; Current X position
-%define COMPLq R2q ; Component A value
-%define COMPLd R2d ; Component A value
-%define U_ARG_REGq R0q ; U plane address argument
-%define V_ARG_REGq R1q ; V plane address argument
-%define SOURCE_DX_ARG_REGq R3q ; Source dx argument
-%define WIDTH_ARG_REGq R2q ; Width argument
-
-%ifdef PIC
-; PIC code shared COMPR, U and V with the same register. Need to be careful in the
-; code they don't mix up. This allows R3q to be used for YUV table.
-%define COMPRq R0q ; Component B value
-%define COMPRd R0d ; Component B value
-%define Uq R0q ; U plane address
-%define Vq R0q ; V plane address
-%define U_PLANE WORD_SIZE [rsp + 3 * gprsize]
-%define TABLE R3q ; Address of the table
-%else
-; Non-PIC code defines.
-%define COMPRq R3q ; Component B value
-%define COMPRd R3d ; Component B value
-%define Uq R0q ; U plane address
-%define Vq R3q ; V plane address
-%define TABLE mangle(kCoefficientsRgbY)
-%endif
-
-; Defines for stack variables. These are used in both PIC and non-PIC code.
-%define V_PLANE WORD_SIZE [rsp + 2 * gprsize]
-%define SOURCE_DX WORD_SIZE [rsp + gprsize]
-%define SOURCE_WIDTH WORD_SIZE [rsp]
-
-; Handle stack variables differently for PIC and non-PIC code.
-
-%ifdef PIC
-; Define stack usage for PIC code. PIC code push U plane onto stack.
- PUSH U_ARG_REGq
- PUSH V_ARG_REGq
- PUSH SOURCE_DX_ARG_REGq
- imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
- PUSH WIDTH_ARG_REGq
-
-; Load the address of kCoefficientsRgbY into TABLE
- mov TEMPq, SOURCE_DX_ARG_REGq ; Need to save source_dx first
- LOAD_SYM TABLE, mangle(kCoefficientsRgbY)
-%define SOURCE_DX_ARG_REGq TEMPq ; Overwrite SOURCE_DX_ARG_REGq to TEMPq
-%else
-; Define stack usage. Non-PIC code just push 3 registers to stack.
- PUSH V_ARG_REGq
- PUSH SOURCE_DX_ARG_REGq
- imul WIDTH_ARG_REGq, SOURCE_DX_ARG_REGq ; source_width = width * source_dx
- PUSH WIDTH_ARG_REGq
-%endif
-
-%macro EPILOGUE 0
-%ifdef PIC
- ADD rsp, 4 * gprsize
-%else
- ADD rsp, 3 * gprsize
-%endif
-%endmacro
-
- xor Xq, Xq ; x = 0
- cmp SOURCE_DX_ARG_REGq, 0x20000
- jl .lscaleend
- mov Xq, 0x8000 ; x = 0.5 for 1/2 or less
- jmp .lscaleend
-
-.lscaleloop:
-%ifdef PIC
- mov Uq, U_PLANE ; PIC code saves U_PLANE on stack.
-%endif
-
-; Define macros for scaling YUV components since they are reused.
-%macro SCALEUV 1
- mov TEMPq, Xq
- sar TEMPq, 0x11
- movzx COMPLd, BYTE [%1 + TEMPq]
- movzx COMPRd, BYTE [%1 + TEMPq + 1]
- mov TEMPq, Xq
- and TEMPq, 0x1fffe
- imul COMPRq, TEMPq
- xor TEMPq, 0x1fffe
- imul COMPLq, TEMPq
- add COMPLq, COMPRq
- shr COMPLq, 17
-%endmacro
- SCALEUV Uq ; Use the above macro to scale U
- movq mm0, [TABLE + 2048 + 8 * COMPLq]
-
- mov Vq, V_PLANE ; Read V address from stack
- SCALEUV Vq ; Use the above macro to scale V
- paddsw mm0, [TABLE + 4096 + 8 * COMPLq]
-
-%macro SCALEY 0
- mov TEMPq, Xq
- sar TEMPq, 0x10
- movzx COMPLd, BYTE [Yq + TEMPq]
- movzx COMPRd, BYTE [Yq + TEMPq + 1]
- mov TEMPq, Xq
- add Xq, SOURCE_DX ; Add source_dx from stack
- and TEMPq, 0xffff
- imul COMPRq, TEMPq
- xor TEMPq, 0xffff
- imul COMPLq, TEMPq
- add COMPLq, COMPRq
- shr COMPLq, 16
-%endmacro
- SCALEY ; Use the above macro to scale Y1
- movq mm1, [TABLE + 8 * COMPLq]
-
- cmp Xq, SOURCE_WIDTH ; Compare source_width from stack
- jge .lscalelastpixel
-
- SCALEY ; Use the above macro to sacle Y2
- movq mm2, [TABLE + 8 * COMPLq]
-
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 0x6
- psraw mm2, 0x6
- packuswb mm1, mm2
- MOVQ [ARGBq], mm1
- add ARGBq, 0x8
-
-.lscaleend:
- cmp Xq, SOURCE_WIDTH ; Compare source_width from stack
- jl .lscaleloop
- EPILOGUE
- RET
-
-.lscalelastpixel:
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd [ARGBq], mm1
- EPILOGUE
- RET
diff --git a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm b/src/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
deleted file mode 100644
index db78544..0000000
--- a/src/media/base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm
+++ /dev/null
@@ -1,142 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX instructions.
-;
- SECTION_TEXT
- CPU MMX
-
-%define SYMBOL LinearScaleYUVToRGB32Row_MMX_X64
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-mangle(SYMBOL):
- %assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
-
-; Parameters are in the following order:
-; 1. Y plane
-; 2. U plane
-; 3. V plane
-; 4. ARGB frame
-; 5. Width
-; 6. Source dx
-
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMPL
-
-%define TABLEq r10
-%define Xq r11
-%define INDEXq r12
-%define COMPRd r13d
-%define COMPRq r13
-%define FRACTIONq r14
-
- PUSH TABLEq
- PUSH Xq
- PUSH INDEXq
- PUSH COMPRq
- PUSH FRACTIONq
-
-%macro EPILOGUE 0
- POP FRACTIONq
- POP COMPRq
- POP INDEXq
- POP Xq
- POP TABLEq
-%endmacro
-
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
-
- imul WIDTHq, SOURCE_DXq ; source_width = width * source_dx
- xor Xq, Xq ; x = 0
- cmp SOURCE_DXq, 0x20000
- jl .lscaleend
- mov Xq, 0x8000 ; x = 0.5 for 1/2 or less
- jmp .lscaleend
-
-.lscaleloop:
- ; Interpolate U
- mov INDEXq, Xq
- sar INDEXq, 0x11
- movzx COMPLd, BYTE [Uq + INDEXq]
- movzx COMPRd, BYTE [Uq + INDEXq + 1]
- mov FRACTIONq, Xq
- and FRACTIONq, 0x1fffe
- imul COMPRq, FRACTIONq
- xor FRACTIONq, 0x1fffe
- imul COMPLq, FRACTIONq
- add COMPLq, COMPRq
- shr COMPLq, 17
- movq mm0, [TABLEq + 2048 + 8 * COMPLq]
-
- ; Interpolate V
- movzx COMPLd, BYTE [Vq + INDEXq]
- movzx COMPRd, BYTE [Vq + INDEXq + 1]
- ; Trick here to imul COMPL first then COMPR.
- ; Saves two instruction. :)
- imul COMPLq, FRACTIONq
- xor FRACTIONq, 0x1fffe
- imul COMPRq, FRACTIONq
- add COMPLq, COMPRq
- shr COMPLq, 17
- paddsw mm0, [TABLEq + 4096 + 8 * COMPLq]
-
- ; Interpolate first Y1.
- lea INDEXq, [Xq + SOURCE_DXq] ; INDEXq now points to next pixel.
- ; Xq points to current pixel.
- mov FRACTIONq, Xq
- sar Xq, 0x10
- movzx COMPLd, BYTE [Yq + Xq]
- movzx COMPRd, BYTE [Yq + Xq + 1]
- and FRACTIONq, 0xffff
- imul COMPRq, FRACTIONq
- xor FRACTIONq, 0xffff
- imul COMPLq, FRACTIONq
- add COMPLq, COMPRq
- shr COMPLq, 16
- movq mm1, [TABLEq + 8 * COMPLq]
-
- ; Interpolate Y2 if available.
- cmp INDEXq, WIDTHq
- jge .lscalelastpixel
-
- lea Xq, [INDEXq + SOURCE_DXq] ; Xq points to next pixel.
- ; INDEXq points to current pixel.
- mov FRACTIONq, INDEXq
- sar INDEXq, 0x10
- movzx COMPLd, BYTE [Yq + INDEXq]
- movzx COMPRd, BYTE [Yq + INDEXq + 1]
- and FRACTIONq, 0xffff
- imul COMPRq, FRACTIONq
- xor FRACTIONq, 0xffff
- imul COMPLq, FRACTIONq
- add COMPLq, COMPRq
- shr COMPLq, 16
- movq mm2, [TABLEq + 8 * COMPLq]
-
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 0x6
- psraw mm2, 0x6
- packuswb mm1, mm2
- movntq [ARGBq], mm1
- add ARGBq, 0x8
-
-.lscaleend:
- cmp Xq, WIDTHq
- jl .lscaleloop
- jmp .epilogue
-
-.lscalelastpixel:
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd [ARGBq], mm1
-
-.epilogue
- EPILOGUE
- RET
diff --git a/src/media/base/simd/linear_scale_yuv_to_rgb_sse.asm b/src/media/base/simd/linear_scale_yuv_to_rgb_sse.asm
deleted file mode 100644
index 847911c..0000000
--- a/src/media/base/simd/linear_scale_yuv_to_rgb_sse.asm
+++ /dev/null
@@ -1,23 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX and SSE instructions.
-;
- SECTION_TEXT
- CPU MMX, SSE
-
-; Use movq to save the output.
-%define MOVQ movntq
-
-; void LinearScaleYUVToRGB32Row_SSE(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width,
-; int source_dx);
-%define SYMBOL LinearScaleYUVToRGB32Row_SSE
-%include "linear_scale_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/scale_yuv_to_rgb_mmx.asm b/src/media/base/simd/scale_yuv_to_rgb_mmx.asm
deleted file mode 100644
index 6a83757..0000000
--- a/src/media/base/simd/scale_yuv_to_rgb_mmx.asm
+++ /dev/null
@@ -1,23 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX instructions.
-;
- SECTION_TEXT
- CPU MMX
-
-; Use movq to save the output.
-%define MOVQ movq
-
-; void ScaleYUVToRGB32Row_MMX(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width,
-; int source_dx);
-%define SYMBOL ScaleYUVToRGB32Row_MMX
-%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/scale_yuv_to_rgb_mmx.inc b/src/media/base/simd/scale_yuv_to_rgb_mmx.inc
deleted file mode 100644
index 94c101c..0000000
--- a/src/media/base/simd/scale_yuv_to_rgb_mmx.inc
+++ /dev/null
@@ -1,115 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-mangle(SYMBOL):
- %assign stack_offset 0
-
- extern mangle(kCoefficientsRgbY)
-
-; Parameters are in the following order:
-; 1. Y plane
-; 2. U plane
-; 3. V plane
-; 4. ARGB frame
-; 5. Width
-; 6. Source dx
-
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, R1, R2, TEMP
-
-%ifdef ARCH_X86_64
-%define WORD_SIZE QWORD
-%else
-%define WORD_SIZE DWORD
-%endif
-
-%ifdef PIC
- PUSH R1q ; Width
-%endif
- PUSH R2q ; Source dx
-
-%define SOURCE_DX WORD_SIZE [rsp]
-
-; PIC code.
-%ifdef PIC
- LOAD_SYM R1q, mangle(kCoefficientsRgbY)
-%define WIDTH WORD_SIZE [rsp + gprsize]
-%define TABLE R1q
-%define Xq R2q
-
-; Non-PIC code.
-%else
-%define WIDTH R1q
-%define TABLE mangle(kCoefficientsRgbY)
-%define Xq R2q
-%endif
-
- ; Set Xq index to 0.
- xor Xq, Xq
- jmp .scaleend
-
-.scaleloop:
- ; TABLE can either be a register or a symbol depending on this is
- ; PIC or not.
- mov TEMPq, Xq
- sar TEMPq, 17
- movzx TEMPd, BYTE [Uq + TEMPq]
- movq mm0, [TABLE + 2048 + 8 * TEMPq]
- mov TEMPq, Xq
- sar TEMPq, 17
- movzx TEMPd, BYTE [Vq + TEMPq]
- paddsw mm0, [TABLE + 4096 + 8 * TEMPq]
- mov TEMPq, Xq
- add Xq, SOURCE_DX
- sar TEMPq, 16
- movzx TEMPd, BYTE [Yq + TEMPq]
- movq mm1, [TABLE + 8 * TEMPq]
- mov TEMPq, Xq
- add Xq, SOURCE_DX
- sar TEMPq, 16
- movzx TEMPd, BYTE [Yq + TEMPq]
- movq mm2, [TABLE + 8 * TEMPq]
- paddsw mm1, mm0
- paddsw mm2, mm0
- psraw mm1, 6
- psraw mm2, 6
- packuswb mm1, mm2
- MOVQ QWORD [ARGBq], mm1
- add ARGBq, 8
-
-.scaleend:
- ; WIDTH can either be a register or memory depending on this is
- ; PIC or not.
- sub WIDTH, 2
- jns .scaleloop
-
- and WIDTH, 1 ; odd number of pixels?
- jz .scaledone
-
- mov TEMPq, Xq
- sar TEMPq, 17
- movzx TEMPd, BYTE [Uq + TEMPq]
- movq mm0, [TABLE + 2048 + 8 * TEMPq]
- mov TEMPq, Xq
- sar TEMPq, 17
- movzx TEMPd, BYTE [Vq + TEMPq]
- paddsw mm0, [TABLE + 4096 + 8 * TEMPq]
- mov TEMPq, Xq
- sar TEMPq, 16
- movzx TEMPd, BYTE [Yq + TEMPq]
- movq mm1, [TABLE + 8 * TEMPq]
- paddsw mm1, mm0
- psraw mm1, 6
- packuswb mm1, mm1
- movd DWORD [ARGBq], mm1
-
-.scaledone:
-%ifdef PIC
- ADD rsp, 2 * gprsize
-%else
- ADD rsp, gprsize
-%endif
- RET
diff --git a/src/media/base/simd/scale_yuv_to_rgb_sse.asm b/src/media/base/simd/scale_yuv_to_rgb_sse.asm
deleted file mode 100644
index 5b849a6..0000000
--- a/src/media/base/simd/scale_yuv_to_rgb_sse.asm
+++ /dev/null
@@ -1,23 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX and SSE instructions.
-;
- SECTION_TEXT
- CPU MMX, SSE
-
-; Use movq to save the output.
-%define MOVQ movntq
-
-; void ScaleYUVToRGB32Row_SSE(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width,
-; int source_dx);
-%define SYMBOL ScaleYUVToRGB32Row_SSE
-%include "scale_yuv_to_rgb_mmx.inc"
diff --git a/src/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm b/src/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
deleted file mode 100644
index 5e58146..0000000
--- a/src/media/base/simd/scale_yuv_to_rgb_sse2_x64.asm
+++ /dev/null
@@ -1,110 +0,0 @@
-; Copyright (c) 2011 The Chromium Authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-%include "x86inc.asm"
-
-;
-; This file uses MMX, SSE2 and instructions.
-;
- SECTION_TEXT
- CPU SSE2
-
-; void ScaleYUVToRGB32Row_SSE2_X64(const uint8* y_buf,
-; const uint8* u_buf,
-; const uint8* v_buf,
-; uint8* rgb_buf,
-; int width,
-; int source_dx);
-%define SYMBOL ScaleYUVToRGB32Row_SSE2_X64
-
- global mangle(SYMBOL) PRIVATE
- align function_align
-
-mangle(SYMBOL):
- %assign stack_offset 0
- extern mangle(kCoefficientsRgbY)
-
-; Parameters are in the following order:
-; 1. Y plane
-; 2. U plane
-; 3. V plane
-; 4. ARGB frame
-; 5. Width
-; 6. Source dx
-
-PROLOGUE 6, 7, 3, Y, U, V, ARGB, WIDTH, SOURCE_DX, COMP
-
-%define TABLEq r10
-%define Xq r11
-%define INDEXq r12
- PUSH r10
- PUSH r11
- PUSH r12
-
- LOAD_SYM TABLEq, mangle(kCoefficientsRgbY)
-
- ; Set Xq index to 0.
- xor Xq, Xq
- jmp .scaleend
-
-.scaleloop:
- ; Read UV pixels.
- mov INDEXq, Xq
- sar INDEXq, 17
- movzx COMPd, BYTE [Uq + INDEXq]
- movq xmm0, [TABLEq + 2048 + 8 * COMPq]
- movzx COMPd, BYTE [Vq + INDEXq]
- movq xmm1, [TABLEq + 4096 + 8 * COMPq]
-
- ; Read first Y pixel.
- lea INDEXq, [Xq + SOURCE_DXq] ; INDEXq nows points to next pixel.
- sar Xq, 16
- movzx COMPd, BYTE [Yq + Xq]
- paddsw xmm0, xmm1 ; Hide a ADD after memory load.
- movq xmm1, [TABLEq + 8 * COMPq]
-
- ; Read next Y pixel.
- lea Xq, [INDEXq + SOURCE_DXq] ; Xq now points to next pixel.
- sar INDEXq, 16
- movzx COMPd, BYTE [Yq + INDEXq]
- movq xmm2, [TABLEq + 8 * COMPq]
- paddsw xmm1, xmm0
- paddsw xmm2, xmm0
- shufps xmm1, xmm2, 0x44 ; Join two pixels into one XMM register
- psraw xmm1, 6
- packuswb xmm1, xmm1
- movq QWORD [ARGBq], xmm1
- add ARGBq, 8
-
-.scaleend:
- sub WIDTHq, 2
- jns .scaleloop
-
- and WIDTHq, 1 ; odd number of pixels?
- jz .scaledone
-
- ; Read U V components.
- mov INDEXq, Xq
- sar INDEXq, 17
- movzx COMPd, BYTE [Uq + INDEXq]
- movq xmm0, [TABLEq + 2048 + 8 * COMPq]
- movzx COMPd, BYTE [Vq + INDEXq]
- movq xmm1, [TABLEq + 4096 + 8 * COMPq]
- paddsw xmm0, xmm1
-
- ; Read one Y component.
- mov INDEXq, Xq
- sar INDEXq, 16
- movzx COMPd, BYTE [Yq + INDEXq]
- movq xmm1, [TABLEq + 8 * COMPq]
- paddsw xmm1, xmm0
- psraw xmm1, 6
- packuswb xmm1, xmm1
- movd DWORD [ARGBq], xmm1
-
-.scaledone:
- POP r12
- POP r11
- POP r10
- RET
diff --git a/src/media/base/simd/x86inc.asm b/src/media/base/simd/x86inc.asm
deleted file mode 100644
index 223ea3d..0000000
--- a/src/media/base/simd/x86inc.asm
+++ /dev/null
@@ -1,1012 +0,0 @@
-;*****************************************************************************
-;* x86inc.asm
-;*****************************************************************************
-;* Copyright (C) 2005-2011 x264 project
-;*
-;* Authors: Loren Merritt <lorenm@u.washington.edu>
-;* Anton Mitrofanov <BugMaster@narod.ru>
-;* Jason Garrett-Glaser <darkshikari@gmail.com>
-;*
-;* Permission to use, copy, modify, and/or distribute this software for any
-;* purpose with or without fee is hereby granted, provided that the above
-;* copyright notice and this permission notice appear in all copies.
-;*
-;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-;*****************************************************************************
-
-; This is a header file for the x264ASM assembly language, which uses
-; NASM/YASM syntax combined with a large number of macros to provide easy
-; abstraction between different calling conventions (x86_32, win64, linux64).
-; It also has various other useful features to simplify writing the kind of
-; DSP functions that are most often used in x264.
-
-; Unlike the rest of x264, this file is available under an ISC license, as it
-; has significant usefulness outside of x264 and we want it to be available
-; to the largest audience possible. Of course, if you modify it for your own
-; purposes to add a new feature, we strongly encourage contributing a patch
-; as this feature might be useful for others as well. Send patches or ideas
-; to x264-devel@videolan.org .
-
-%ifndef MEDIA_BASE_SIMD_X86INC_ASM_
-%define MEDIA_BASE_SIMD_X86INC_ASM_
-
-%define program_name ff
-
-%ifdef ARCH_X86_64
- %ifidn __OUTPUT_FORMAT__,win32
- %define WIN64
- %else
- %define UNIX64
- %endif
-%endif
-
-%ifdef PREFIX
- %define mangle(x) _ %+ x
-%else
- %define mangle(x) x
-%endif
-
-; FIXME: All of the 64bit asm functions that take a stride as an argument
-; via register, assume that the high dword of that register is filled with 0.
-; This is true in practice (since we never do any 64bit arithmetic on strides,
-; and x264's strides are all positive), but is not guaranteed by the ABI.
-
-; Name of the .rodata section.
-; Kludge: Something on OS X fails to align .rodata even given an align attribute,
-; so use a different read-only section.
-%ifdef CHROMIUM
-%macro SECTION_RODATA 0-1 16
- %ifidn __OUTPUT_FORMAT__,macho64
- SECTION .text align=%1
- %elifidn __OUTPUT_FORMAT__,macho
- SECTION .text align=%1
- fakegot:
- %elifidn __OUTPUT_FORMAT__,aout
- section .text
- %else
- SECTION .rodata align=%1
- %endif
-%endmacro
-%else
-%macro SECTION_RODATA 0-1 16
- %ifidn __OUTPUT_FORMAT__,aout
- section .text
- %else
- SECTION .rodata align=%1
- %endif
-%endmacro
-%endif
-
-; aout does not support align=
-%macro SECTION_TEXT 0-1 16
- %ifidn __OUTPUT_FORMAT__,aout
- SECTION .text
- %else
- SECTION .text align=%1
- %endif
-%endmacro
-
-%ifdef WIN64
- %define PIC
-%elifndef ARCH_X86_64
-; For chromium we may build PIC code even for 32 bits system.
-%ifndef CHROMIUM
-; x86_32 doesn't require PIC.
-; Some distros prefer shared objects to be PIC, but nothing breaks if
-; the code contains a few textrels, so we'll skip that complexity.
- %undef PIC
-%endif
-%endif
-%ifdef PIC
- default rel
-%endif
-
-; Macros to eliminate most code duplication between x86_32 and x86_64:
-; Currently this works only for leaf functions which load all their arguments
-; into registers at the start, and make no other use of the stack. Luckily that
-; covers most of x264's asm.
-
-; PROLOGUE:
-; %1 = number of arguments. loads them from stack if needed.
-; %2 = number of registers used. pushes callee-saved regs if needed.
-; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
-; %4 = list of names to define to registers
-; PROLOGUE can also be invoked by adding the same options to cglobal
-
-; e.g.
-; cglobal foo, 2,3,0, dst, src, tmp
-; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
-
-; TODO Some functions can use some args directly from the stack. If they're the
-; last args then you can just not declare them, but if they're in the middle
-; we need more flexible macro.
-
-; RET:
-; Pops anything that was pushed by PROLOGUE
-
-; REP_RET:
-; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
-; which are slow when a normal ret follows a branch.
-
-; registers:
-; rN and rNq are the native-size register holding function argument N
-; rNd, rNw, rNb are dword, word, and byte size
-; rNm is the original location of arg N (a register or on the stack), dword
-; rNmp is native size
-
-%macro DECLARE_REG 6
- %define r%1q %2
- %define r%1d %3
- %define r%1w %4
- %define r%1b %5
- %define r%1m %6
- %ifid %6 ; i.e. it's a register
- %define r%1mp %2
- %elifdef ARCH_X86_64 ; memory
- %define r%1mp qword %6
- %else
- %define r%1mp dword %6
- %endif
- %define r%1 %2
-%endmacro
-
-%macro DECLARE_REG_SIZE 2
- %define r%1q r%1
- %define e%1q r%1
- %define r%1d e%1
- %define e%1d e%1
- %define r%1w %1
- %define e%1w %1
- %define r%1b %2
- %define e%1b %2
-%ifndef ARCH_X86_64
- %define r%1 e%1
-%endif
-%endmacro
-
-DECLARE_REG_SIZE ax, al
-DECLARE_REG_SIZE bx, bl
-DECLARE_REG_SIZE cx, cl
-DECLARE_REG_SIZE dx, dl
-DECLARE_REG_SIZE si, sil
-DECLARE_REG_SIZE di, dil
-DECLARE_REG_SIZE bp, bpl
-
-; t# defines for when per-arch register allocation is more complex than just function arguments
-
-%macro DECLARE_REG_TMP 1-*
- %assign %%i 0
- %rep %0
- CAT_XDEFINE t, %%i, r%1
- %assign %%i %%i+1
- %rotate 1
- %endrep
-%endmacro
-
-%macro DECLARE_REG_TMP_SIZE 0-*
- %rep %0
- %define t%1q t%1 %+ q
- %define t%1d t%1 %+ d
- %define t%1w t%1 %+ w
- %define t%1b t%1 %+ b
- %rotate 1
- %endrep
-%endmacro
-
-DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9
-
-%ifdef ARCH_X86_64
- %define gprsize 8
-%else
- %define gprsize 4
-%endif
-
-%macro PUSH 1
- push %1
- %assign stack_offset stack_offset+gprsize
-%endmacro
-
-%macro POP 1
- pop %1
- %assign stack_offset stack_offset-gprsize
-%endmacro
-
-%macro SUB 2
- sub %1, %2
- %ifidn %1, rsp
- %assign stack_offset stack_offset+(%2)
- %endif
-%endmacro
-
-%macro ADD 2
- add %1, %2
- %ifidn %1, rsp
- %assign stack_offset stack_offset-(%2)
- %endif
-%endmacro
-
-%macro movifnidn 2
- %ifnidn %1, %2
- mov %1, %2
- %endif
-%endmacro
-
-%macro movsxdifnidn 2
- %ifnidn %1, %2
- movsxd %1, %2
- %endif
-%endmacro
-
-%macro ASSERT 1
- %if (%1) == 0
- %error assert failed
- %endif
-%endmacro
-
-%macro DEFINE_ARGS 0-*
- %ifdef n_arg_names
- %assign %%i 0
- %rep n_arg_names
- CAT_UNDEF arg_name %+ %%i, q
- CAT_UNDEF arg_name %+ %%i, d
- CAT_UNDEF arg_name %+ %%i, w
- CAT_UNDEF arg_name %+ %%i, b
- CAT_UNDEF arg_name %+ %%i, m
- CAT_UNDEF arg_name, %%i
- %assign %%i %%i+1
- %endrep
- %endif
-
- %assign %%i 0
- %rep %0
- %xdefine %1q r %+ %%i %+ q
- %xdefine %1d r %+ %%i %+ d
- %xdefine %1w r %+ %%i %+ w
- %xdefine %1b r %+ %%i %+ b
- %xdefine %1m r %+ %%i %+ m
- CAT_XDEFINE arg_name, %%i, %1
- %assign %%i %%i+1
- %rotate 1
- %endrep
- %assign n_arg_names %%i
-%endmacro
-
-%ifdef WIN64 ; Windows x64 ;=================================================
-
-DECLARE_REG 0, rcx, ecx, cx, cl, ecx
-DECLARE_REG 1, rdx, edx, dx, dl, edx
-DECLARE_REG 2, r8, r8d, r8w, r8b, r8d
-DECLARE_REG 3, r9, r9d, r9w, r9b, r9d
-DECLARE_REG 4, rdi, edi, di, dil, [rsp + stack_offset + 40]
-DECLARE_REG 5, rsi, esi, si, sil, [rsp + stack_offset + 48]
-DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56]
-%define r7m [rsp + stack_offset + 64]
-%define r8m [rsp + stack_offset + 72]
-
-%macro LOAD_IF_USED 2 ; reg_id, number_of_args
- %if %1 < %2
- mov r%1, [rsp + stack_offset + 8 + %1*8]
- %endif
-%endmacro
-
-%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
- ASSERT %2 >= %1
- %assign regs_used %2
- ASSERT regs_used <= 7
- %if regs_used > 4
- push r4
- push r5
- %assign stack_offset stack_offset+16
- %endif
- WIN64_SPILL_XMM %3
- LOAD_IF_USED 4, %1
- LOAD_IF_USED 5, %1
- LOAD_IF_USED 6, %1
- DEFINE_ARGS %4
-%endmacro
-
-%macro WIN64_SPILL_XMM 1
- %assign xmm_regs_used %1
- ASSERT xmm_regs_used <= 16
- %if xmm_regs_used > 6
- sub rsp, (xmm_regs_used-6)*16+16
- %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16
- %assign %%i xmm_regs_used
- %rep (xmm_regs_used-6)
- %assign %%i %%i-1
- movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
- %endrep
- %endif
-%endmacro
-
-%macro WIN64_RESTORE_XMM_INTERNAL 1
- %if xmm_regs_used > 6
- %assign %%i xmm_regs_used
- %rep (xmm_regs_used-6)
- %assign %%i %%i-1
- movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
- %endrep
- add %1, (xmm_regs_used-6)*16+16
- %endif
-%endmacro
-
-%macro WIN64_RESTORE_XMM 1
- WIN64_RESTORE_XMM_INTERNAL %1
- %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
- %assign xmm_regs_used 0
-%endmacro
-
-%macro RET 0
- WIN64_RESTORE_XMM_INTERNAL rsp
- %if regs_used > 4
- pop r5
- pop r4
- %endif
- ret
-%endmacro
-
-%macro REP_RET 0
- %if regs_used > 4 || xmm_regs_used > 6
- RET
- %else
- rep ret
- %endif
-%endmacro
-
-%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
-
-DECLARE_REG 0, rdi, edi, di, dil, edi
-DECLARE_REG 1, rsi, esi, si, sil, esi
-DECLARE_REG 2, rdx, edx, dx, dl, edx
-DECLARE_REG 3, rcx, ecx, cx, cl, ecx
-DECLARE_REG 4, r8, r8d, r8w, r8b, r8d
-DECLARE_REG 5, r9, r9d, r9w, r9b, r9d
-DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 8]
-%define r7m [rsp + stack_offset + 16]
-%define r8m [rsp + stack_offset + 24]
-
-%macro LOAD_IF_USED 2 ; reg_id, number_of_args
- %if %1 < %2
- mov r%1, [rsp - 40 + %1*8]
- %endif
-%endmacro
-
-%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
- ASSERT %2 >= %1
- ASSERT %2 <= 7
- LOAD_IF_USED 6, %1
- DEFINE_ARGS %4
-%endmacro
-
-%macro RET 0
- ret
-%endmacro
-
-%macro REP_RET 0
- rep ret
-%endmacro
-
-%else ; X86_32 ;==============================================================
-
-; Begin chromium edits
-%ifdef CHROMIUM
-; Change the order of registers so we can get the lower 8-bit or the 5th and 6th
-; arguments.
-DECLARE_REG 0, esi, esi, si, null, [esp + stack_offset + 4]
-DECLARE_REG 1, edi, edi, di, null, [esp + stack_offset + 8]
-DECLARE_REG 2, ecx, ecx, cx, cl, [esp + stack_offset + 12]
-DECLARE_REG 3, edx, edx, dx, dl, [esp + stack_offset + 16]
-DECLARE_REG 4, eax, eax, ax, al, [esp + stack_offset + 20]
-DECLARE_REG 5, ebx, ebx, bx, bl, [esp + stack_offset + 24]
-%else
-DECLARE_REG 0, eax, eax, ax, al, [esp + stack_offset + 4]
-DECLARE_REG 1, ecx, ecx, cx, cl, [esp + stack_offset + 8]
-DECLARE_REG 2, edx, edx, dx, dl, [esp + stack_offset + 12]
-DECLARE_REG 3, ebx, ebx, bx, bl, [esp + stack_offset + 16]
-DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
-DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
-%endif
-; End chromium edits
-DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
-%define r7m [esp + stack_offset + 32]
-%define r8m [esp + stack_offset + 36]
-%define rsp esp
-
-%macro PUSH_IF_USED 1 ; reg_id
- %if %1 < regs_used
- push r%1
- %assign stack_offset stack_offset+4
- %endif
-%endmacro
-
-%macro POP_IF_USED 1 ; reg_id
- %if %1 < regs_used
- pop r%1
- %endif
-%endmacro
-
-%macro LOAD_IF_USED 2 ; reg_id, number_of_args
- %if %1 < %2
- mov r%1, [esp + stack_offset + 4 + %1*4]
- %endif
-%endmacro
-
-%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
- ASSERT %2 >= %1
- %assign regs_used %2
- ASSERT regs_used <= 7
-%ifdef CHROMIUM
- PUSH_IF_USED 0
- PUSH_IF_USED 1
- PUSH_IF_USED 5
-%else
- PUSH_IF_USED 3
- PUSH_IF_USED 4
- PUSH_IF_USED 5
-%endif
- PUSH_IF_USED 6
- LOAD_IF_USED 0, %1
- LOAD_IF_USED 1, %1
- LOAD_IF_USED 2, %1
- LOAD_IF_USED 3, %1
- LOAD_IF_USED 4, %1
- LOAD_IF_USED 5, %1
- LOAD_IF_USED 6, %1
- DEFINE_ARGS %4
-%endmacro
-
-%macro RET 0
- POP_IF_USED 6
-%ifdef CHROMIUM
- POP_IF_USED 5
- POP_IF_USED 1
- POP_IF_USED 0
-%else
- POP_IF_USED 5
- POP_IF_USED 4
- POP_IF_USED 3
-%endif
- ret
-%endmacro
-
-%macro REP_RET 0
- %if regs_used > 3
- RET
- %else
- rep ret
- %endif
-%endmacro
-
-%endif ;======================================================================
-
-%ifndef WIN64
-%macro WIN64_SPILL_XMM 1
-%endmacro
-%macro WIN64_RESTORE_XMM 1
-%endmacro
-%endif
-
-
-
-;=============================================================================
-; arch-independent part
-;=============================================================================
-
-%assign function_align 16
-
-; Symbol prefix for C linkage
-%macro cglobal 1-2+
- %xdefine %1 mangle(program_name %+ _ %+ %1)
- %xdefine %1.skip_prologue %1 %+ .skip_prologue
- %ifidn __OUTPUT_FORMAT__,elf
- global %1:function hidden
- %else
- global %1
- %endif
- align function_align
- %1:
- RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
- %assign stack_offset 0
- %if %0 > 1
- PROLOGUE %2
- %endif
-%endmacro
-
-%macro cextern 1
- %xdefine %1 mangle(program_name %+ _ %+ %1)
- extern %1
-%endmacro
-
-;like cextern, but without the prefix
-%macro cextern_naked 1
- %xdefine %1 mangle(%1)
- extern %1
-%endmacro
-
-%macro const 2+
- %xdefine %1 mangle(program_name %+ _ %+ %1)
- global %1
- %1: %2
-%endmacro
-
-; This is needed for ELF, otherwise the GNU linker assumes the stack is
-; executable by default.
-%ifidn __OUTPUT_FORMAT__,elf
-SECTION .note.GNU-stack noalloc noexec nowrite progbits
-%endif
-
-; merge mmx and sse*
-
-%macro CAT_XDEFINE 3
- %xdefine %1%2 %3
-%endmacro
-
-%macro CAT_UNDEF 2
- %undef %1%2
-%endmacro
-
-%macro INIT_MMX 0
- %assign avx_enabled 0
- %define RESET_MM_PERMUTATION INIT_MMX
- %define mmsize 8
- %define num_mmregs 8
- %define mova movq
- %define movu movq
- %define movh movd
- %define movnta movntq
- %assign %%i 0
- %rep 8
- CAT_XDEFINE m, %%i, mm %+ %%i
- CAT_XDEFINE nmm, %%i, %%i
- %assign %%i %%i+1
- %endrep
- %rep 8
- CAT_UNDEF m, %%i
- CAT_UNDEF nmm, %%i
- %assign %%i %%i+1
- %endrep
-%endmacro
-
-%macro INIT_XMM 0
- %assign avx_enabled 0
- %define RESET_MM_PERMUTATION INIT_XMM
- %define mmsize 16
- %define num_mmregs 8
- %ifdef ARCH_X86_64
- %define num_mmregs 16
- %endif
- %define mova movdqa
- %define movu movdqu
- %define movh movq
- %define movnta movntdq
- %assign %%i 0
- %rep num_mmregs
- CAT_XDEFINE m, %%i, xmm %+ %%i
- CAT_XDEFINE nxmm, %%i, %%i
- %assign %%i %%i+1
- %endrep
-%endmacro
-
-%macro INIT_AVX 0
- INIT_XMM
- %assign avx_enabled 1
- %define PALIGNR PALIGNR_SSSE3
- %define RESET_MM_PERMUTATION INIT_AVX
-%endmacro
-
-%macro INIT_YMM 0
- %assign avx_enabled 1
- %define RESET_MM_PERMUTATION INIT_YMM
- %define mmsize 32
- %define num_mmregs 8
- %ifdef ARCH_X86_64
- %define num_mmregs 16
- %endif
- %define mova vmovaps
- %define movu vmovups
- %assign %%i 0
- %rep num_mmregs
- CAT_XDEFINE m, %%i, ymm %+ %%i
- CAT_XDEFINE nymm, %%i, %%i
- %assign %%i %%i+1
- %endrep
-%endmacro
-
-INIT_MMX
-
-; I often want to use macros that permute their arguments. e.g. there's no
-; efficient way to implement butterfly or transpose or dct without swapping some
-; arguments.
-;
-; I would like to not have to manually keep track of the permutations:
-; If I insert a permutation in the middle of a function, it should automatically
-; change everything that follows. For more complex macros I may also have multiple
-; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
-;
-; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
-; permutes its arguments. It's equivalent to exchanging the contents of the
-; registers, except that this way you exchange the register names instead, so it
-; doesn't cost any cycles.
-
-%macro PERMUTE 2-* ; takes a list of pairs to swap
-%rep %0/2
- %xdefine tmp%2 m%2
- %xdefine ntmp%2 nm%2
- %rotate 2
-%endrep
-%rep %0/2
- %xdefine m%1 tmp%2
- %xdefine nm%1 ntmp%2
- %undef tmp%2
- %undef ntmp%2
- %rotate 2
-%endrep
-%endmacro
-
-%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
-%rep %0-1
-%ifdef m%1
- %xdefine tmp m%1
- %xdefine m%1 m%2
- %xdefine m%2 tmp
- CAT_XDEFINE n, m%1, %1
- CAT_XDEFINE n, m%2, %2
-%else
- ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
- ; Be careful using this mode in nested macros though, as in some cases there may be
- ; other copies of m# that have already been dereferenced and don't get updated correctly.
- %xdefine %%n1 n %+ %1
- %xdefine %%n2 n %+ %2
- %xdefine tmp m %+ %%n1
- CAT_XDEFINE m, %%n1, m %+ %%n2
- CAT_XDEFINE m, %%n2, tmp
- CAT_XDEFINE n, m %+ %%n1, %%n1
- CAT_XDEFINE n, m %+ %%n2, %%n2
-%endif
- %undef tmp
- %rotate 1
-%endrep
-%endmacro
-
-; If SAVE_MM_PERMUTATION is placed at the end of a function and given the
-; function name, then any later calls to that function will automatically
-; load the permutation, so values can be returned in mmregs.
-%macro SAVE_MM_PERMUTATION 1 ; name to save as
- %assign %%i 0
- %rep num_mmregs
- CAT_XDEFINE %1_m, %%i, m %+ %%i
- %assign %%i %%i+1
- %endrep
-%endmacro
-
-%macro LOAD_MM_PERMUTATION 1 ; name to load from
- %assign %%i 0
- %rep num_mmregs
- CAT_XDEFINE m, %%i, %1_m %+ %%i
- CAT_XDEFINE n, m %+ %%i, %%i
- %assign %%i %%i+1
- %endrep
-%endmacro
-
-%macro call 1
- call %1
- %ifdef %1_m0
- LOAD_MM_PERMUTATION %1
- %endif
-%endmacro
-
-; Substitutions that reduce instruction size but are functionally equivalent
-%macro add 2
- %ifnum %2
- %if %2==128
- sub %1, -128
- %else
- add %1, %2
- %endif
- %else
- add %1, %2
- %endif
-%endmacro
-
-%macro sub 2
- %ifnum %2
- %if %2==128
- add %1, -128
- %else
- sub %1, %2
- %endif
- %else
- sub %1, %2
- %endif
-%endmacro
-
-;=============================================================================
-; AVX abstraction layer
-;=============================================================================
-
-%assign i 0
-%rep 16
- %if i < 8
- CAT_XDEFINE sizeofmm, i, 8
- %endif
- CAT_XDEFINE sizeofxmm, i, 16
- CAT_XDEFINE sizeofymm, i, 32
-%assign i i+1
-%endrep
-%undef i
-
-;%1 == instruction
-;%2 == 1 if float, 0 if int
-;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
-;%4 == number of operands given
-;%5+: operands
-%macro RUN_AVX_INSTR 6-7+
- %if sizeof%5==32
- v%1 %5, %6, %7
- %else
- %if sizeof%5==8
- %define %%regmov movq
- %elif %2
- %define %%regmov movaps
- %else
- %define %%regmov movdqa
- %endif
-
- %if %4>=3+%3
- %ifnidn %5, %6
- %if avx_enabled && sizeof%5==16
- v%1 %5, %6, %7
- %else
- %%regmov %5, %6
- %1 %5, %7
- %endif
- %else
- %1 %5, %7
- %endif
- %elif %3
- %1 %5, %6, %7
- %else
- %1 %5, %6
- %endif
- %endif
-%endmacro
-
-;%1 == instruction
-;%2 == 1 if float, 0 if int
-;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
-%macro AVX_INSTR 3
- %macro %1 2-8 fnord, fnord, fnord, %1, %2, %3
- %ifidn %3, fnord
- RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
- %elifidn %4, fnord
- RUN_AVX_INSTR %6, %7, %8, 3, %1, %2, %3
- %elifidn %5, fnord
- RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
- %else
- RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
- %endif
- %endmacro
-%endmacro
-
-AVX_INSTR addpd, 1, 0
-AVX_INSTR addps, 1, 0
-AVX_INSTR addsd, 1, 0
-AVX_INSTR addss, 1, 0
-AVX_INSTR addsubpd, 1, 0
-AVX_INSTR addsubps, 1, 0
-AVX_INSTR andpd, 1, 0
-AVX_INSTR andps, 1, 0
-AVX_INSTR andnpd, 1, 0
-AVX_INSTR andnps, 1, 0
-AVX_INSTR blendpd, 1, 0
-AVX_INSTR blendps, 1, 0
-AVX_INSTR blendvpd, 1, 0
-AVX_INSTR blendvps, 1, 0
-AVX_INSTR cmppd, 1, 0
-AVX_INSTR cmpps, 1, 0
-AVX_INSTR cmpsd, 1, 0
-AVX_INSTR cmpss, 1, 0
-AVX_INSTR divpd, 1, 0
-AVX_INSTR divps, 1, 0
-AVX_INSTR divsd, 1, 0
-AVX_INSTR divss, 1, 0
-AVX_INSTR dppd, 1, 0
-AVX_INSTR dpps, 1, 0
-AVX_INSTR haddpd, 1, 0
-AVX_INSTR haddps, 1, 0
-AVX_INSTR hsubpd, 1, 0
-AVX_INSTR hsubps, 1, 0
-AVX_INSTR maxpd, 1, 0
-AVX_INSTR maxps, 1, 0
-AVX_INSTR maxsd, 1, 0
-AVX_INSTR maxss, 1, 0
-AVX_INSTR minpd, 1, 0
-AVX_INSTR minps, 1, 0
-AVX_INSTR minsd, 1, 0
-AVX_INSTR minss, 1, 0
-AVX_INSTR mpsadbw, 0, 1
-AVX_INSTR mulpd, 1, 0
-AVX_INSTR mulps, 1, 0
-AVX_INSTR mulsd, 1, 0
-AVX_INSTR mulss, 1, 0
-AVX_INSTR orpd, 1, 0
-AVX_INSTR orps, 1, 0
-AVX_INSTR packsswb, 0, 0
-AVX_INSTR packssdw, 0, 0
-AVX_INSTR packuswb, 0, 0
-AVX_INSTR packusdw, 0, 0
-AVX_INSTR paddb, 0, 0
-AVX_INSTR paddw, 0, 0
-AVX_INSTR paddd, 0, 0
-AVX_INSTR paddq, 0, 0
-AVX_INSTR paddsb, 0, 0
-AVX_INSTR paddsw, 0, 0
-AVX_INSTR paddusb, 0, 0
-AVX_INSTR paddusw, 0, 0
-AVX_INSTR palignr, 0, 1
-AVX_INSTR pand, 0, 0
-AVX_INSTR pandn, 0, 0
-AVX_INSTR pavgb, 0, 0
-AVX_INSTR pavgw, 0, 0
-AVX_INSTR pblendvb, 0, 0
-AVX_INSTR pblendw, 0, 1
-AVX_INSTR pcmpestri, 0, 0
-AVX_INSTR pcmpestrm, 0, 0
-AVX_INSTR pcmpistri, 0, 0
-AVX_INSTR pcmpistrm, 0, 0
-AVX_INSTR pcmpeqb, 0, 0
-AVX_INSTR pcmpeqw, 0, 0
-AVX_INSTR pcmpeqd, 0, 0
-AVX_INSTR pcmpeqq, 0, 0
-AVX_INSTR pcmpgtb, 0, 0
-AVX_INSTR pcmpgtw, 0, 0
-AVX_INSTR pcmpgtd, 0, 0
-AVX_INSTR pcmpgtq, 0, 0
-AVX_INSTR phaddw, 0, 0
-AVX_INSTR phaddd, 0, 0
-AVX_INSTR phaddsw, 0, 0
-AVX_INSTR phsubw, 0, 0
-AVX_INSTR phsubd, 0, 0
-AVX_INSTR phsubsw, 0, 0
-AVX_INSTR pmaddwd, 0, 0
-AVX_INSTR pmaddubsw, 0, 0
-AVX_INSTR pmaxsb, 0, 0
-AVX_INSTR pmaxsw, 0, 0
-AVX_INSTR pmaxsd, 0, 0
-AVX_INSTR pmaxub, 0, 0
-AVX_INSTR pmaxuw, 0, 0
-AVX_INSTR pmaxud, 0, 0
-AVX_INSTR pminsb, 0, 0
-AVX_INSTR pminsw, 0, 0
-AVX_INSTR pminsd, 0, 0
-AVX_INSTR pminub, 0, 0
-AVX_INSTR pminuw, 0, 0
-AVX_INSTR pminud, 0, 0
-AVX_INSTR pmulhuw, 0, 0
-AVX_INSTR pmulhrsw, 0, 0
-AVX_INSTR pmulhw, 0, 0
-AVX_INSTR pmullw, 0, 0
-AVX_INSTR pmulld, 0, 0
-AVX_INSTR pmuludq, 0, 0
-AVX_INSTR pmuldq, 0, 0
-AVX_INSTR por, 0, 0
-AVX_INSTR psadbw, 0, 0
-AVX_INSTR pshufb, 0, 0
-AVX_INSTR psignb, 0, 0
-AVX_INSTR psignw, 0, 0
-AVX_INSTR psignd, 0, 0
-AVX_INSTR psllw, 0, 0
-AVX_INSTR pslld, 0, 0
-AVX_INSTR psllq, 0, 0
-AVX_INSTR pslldq, 0, 0
-AVX_INSTR psraw, 0, 0
-AVX_INSTR psrad, 0, 0
-AVX_INSTR psrlw, 0, 0
-AVX_INSTR psrld, 0, 0
-AVX_INSTR psrlq, 0, 0
-AVX_INSTR psrldq, 0, 0
-AVX_INSTR psubb, 0, 0
-AVX_INSTR psubw, 0, 0
-AVX_INSTR psubd, 0, 0
-AVX_INSTR psubq, 0, 0
-AVX_INSTR psubsb, 0, 0
-AVX_INSTR psubsw, 0, 0
-AVX_INSTR psubusb, 0, 0
-AVX_INSTR psubusw, 0, 0
-AVX_INSTR punpckhbw, 0, 0
-AVX_INSTR punpckhwd, 0, 0
-AVX_INSTR punpckhdq, 0, 0
-AVX_INSTR punpckhqdq, 0, 0
-AVX_INSTR punpcklbw, 0, 0
-AVX_INSTR punpcklwd, 0, 0
-AVX_INSTR punpckldq, 0, 0
-AVX_INSTR punpcklqdq, 0, 0
-AVX_INSTR pxor, 0, 0
-AVX_INSTR shufps, 0, 1
-AVX_INSTR subpd, 1, 0
-AVX_INSTR subps, 1, 0
-AVX_INSTR subsd, 1, 0
-AVX_INSTR subss, 1, 0
-AVX_INSTR unpckhpd, 1, 0
-AVX_INSTR unpckhps, 1, 0
-AVX_INSTR unpcklpd, 1, 0
-AVX_INSTR unpcklps, 1, 0
-AVX_INSTR xorpd, 1, 0
-AVX_INSTR xorps, 1, 0
-
-; 3DNow instructions, for sharing code between AVX, SSE and 3DN
-AVX_INSTR pfadd, 1, 0
-AVX_INSTR pfsub, 1, 0
-AVX_INSTR pfmul, 1, 0
-
-;=============================================================================
-; Chromium extensions
-;=============================================================================
-
-%ifdef CHROMIUM
-; Always build PIC code on Mac or Linux for Chromium.
-%ifdef MACHO
-%define PIC
-%endif
-%ifdef ELF
-%define PIC
-%endif
-
-;
-; LOAD_SYM %1 (reg), %2 (sym)
-; Copies the address to a local symbol to the specified register.
-;
-
-%macro LOAD_SYM 2
-
-%ifdef PIC
- call %%geteip
- add %1, %2 - $
- jmp %%end
-%%geteip:
- mov %1, [rsp]
- ret
-%%end:
-
-%else
- lea %1, [%2]
-%endif
-
-%endmacro
-
-;
-; MOVq %1 (xmm), %2 (reg)
-; MOVq %1 (reg), %2 (xmm)
-; Copies a general-purpose register to an XMM register, and vice versa.
-;
-%macro MOVq 2
-%if gprsize == 8
- movq %1, %2
-%else
- movd %1, %2
-%endif
-%endmacro
-
-;
-; PRIVATE
-; A flag representing the specified symbol is a private symbol. This define adds
-; a hidden flag on Linux and a private_extern flag on Mac. (We can use this
-; private_extern flag only on the latest yasm.)
-;
-%ifdef MACHO
-%define PRIVATE :private_extern
-%elifdef ELF
-%define PRIVATE :hidden
-%else
-%define PRIVATE
-%endif
-
-%endif ; CHROMIUM
-
-%endif ; MEDIA_BASE_SIMD_X86INC_ASM_
diff --git a/src/media/base/simd/yuv_to_rgb_table.cc b/src/media/base/simd/yuv_to_rgb_table.cc
deleted file mode 100644
index f998e85..0000000
--- a/src/media/base/simd/yuv_to_rgb_table.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/simd/yuv_to_rgb_table.h"
-
-extern "C" {
-
-#define RGBY(i) { \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- static_cast<int16>(1.164 * 64 * (i - 16) + 0.5), \
- 0 \
-}
-
-#define RGBU(i) { \
- static_cast<int16>(2.018 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(-0.391 * 64 * (i - 128) + 0.5), \
- 0, \
- static_cast<int16>(256 * 64 - 1) \
-}
-
-#define RGBV(i) { \
- 0, \
- static_cast<int16>(-0.813 * 64 * (i - 128) + 0.5), \
- static_cast<int16>(1.596 * 64 * (i - 128) + 0.5), \
- 0 \
-}
-
-SIMD_ALIGNED(int16 kCoefficientsRgbY[256 * 3][4]) = {
- RGBY(0x00), RGBY(0x01), RGBY(0x02), RGBY(0x03),
- RGBY(0x04), RGBY(0x05), RGBY(0x06), RGBY(0x07),
- RGBY(0x08), RGBY(0x09), RGBY(0x0A), RGBY(0x0B),
- RGBY(0x0C), RGBY(0x0D), RGBY(0x0E), RGBY(0x0F),
- RGBY(0x10), RGBY(0x11), RGBY(0x12), RGBY(0x13),
- RGBY(0x14), RGBY(0x15), RGBY(0x16), RGBY(0x17),
- RGBY(0x18), RGBY(0x19), RGBY(0x1A), RGBY(0x1B),
- RGBY(0x1C), RGBY(0x1D), RGBY(0x1E), RGBY(0x1F),
- RGBY(0x20), RGBY(0x21), RGBY(0x22), RGBY(0x23),
- RGBY(0x24), RGBY(0x25), RGBY(0x26), RGBY(0x27),
- RGBY(0x28), RGBY(0x29), RGBY(0x2A), RGBY(0x2B),
- RGBY(0x2C), RGBY(0x2D), RGBY(0x2E), RGBY(0x2F),
- RGBY(0x30), RGBY(0x31), RGBY(0x32), RGBY(0x33),
- RGBY(0x34), RGBY(0x35), RGBY(0x36), RGBY(0x37),
- RGBY(0x38), RGBY(0x39), RGBY(0x3A), RGBY(0x3B),
- RGBY(0x3C), RGBY(0x3D), RGBY(0x3E), RGBY(0x3F),
- RGBY(0x40), RGBY(0x41), RGBY(0x42), RGBY(0x43),
- RGBY(0x44), RGBY(0x45), RGBY(0x46), RGBY(0x47),
- RGBY(0x48), RGBY(0x49), RGBY(0x4A), RGBY(0x4B),
- RGBY(0x4C), RGBY(0x4D), RGBY(0x4E), RGBY(0x4F),
- RGBY(0x50), RGBY(0x51), RGBY(0x52), RGBY(0x53),
- RGBY(0x54), RGBY(0x55), RGBY(0x56), RGBY(0x57),
- RGBY(0x58), RGBY(0x59), RGBY(0x5A), RGBY(0x5B),
- RGBY(0x5C), RGBY(0x5D), RGBY(0x5E), RGBY(0x5F),
- RGBY(0x60), RGBY(0x61), RGBY(0x62), RGBY(0x63),
- RGBY(0x64), RGBY(0x65), RGBY(0x66), RGBY(0x67),
- RGBY(0x68), RGBY(0x69), RGBY(0x6A), RGBY(0x6B),
- RGBY(0x6C), RGBY(0x6D), RGBY(0x6E), RGBY(0x6F),
- RGBY(0x70), RGBY(0x71), RGBY(0x72), RGBY(0x73),
- RGBY(0x74), RGBY(0x75), RGBY(0x76), RGBY(0x77),
- RGBY(0x78), RGBY(0x79), RGBY(0x7A), RGBY(0x7B),
- RGBY(0x7C), RGBY(0x7D), RGBY(0x7E), RGBY(0x7F),
- RGBY(0x80), RGBY(0x81), RGBY(0x82), RGBY(0x83),
- RGBY(0x84), RGBY(0x85), RGBY(0x86), RGBY(0x87),
- RGBY(0x88), RGBY(0x89), RGBY(0x8A), RGBY(0x8B),
- RGBY(0x8C), RGBY(0x8D), RGBY(0x8E), RGBY(0x8F),
- RGBY(0x90), RGBY(0x91), RGBY(0x92), RGBY(0x93),
- RGBY(0x94), RGBY(0x95), RGBY(0x96), RGBY(0x97),
- RGBY(0x98), RGBY(0x99), RGBY(0x9A), RGBY(0x9B),
- RGBY(0x9C), RGBY(0x9D), RGBY(0x9E), RGBY(0x9F),
- RGBY(0xA0), RGBY(0xA1), RGBY(0xA2), RGBY(0xA3),
- RGBY(0xA4), RGBY(0xA5), RGBY(0xA6), RGBY(0xA7),
- RGBY(0xA8), RGBY(0xA9), RGBY(0xAA), RGBY(0xAB),
- RGBY(0xAC), RGBY(0xAD), RGBY(0xAE), RGBY(0xAF),
- RGBY(0xB0), RGBY(0xB1), RGBY(0xB2), RGBY(0xB3),
- RGBY(0xB4), RGBY(0xB5), RGBY(0xB6), RGBY(0xB7),
- RGBY(0xB8), RGBY(0xB9), RGBY(0xBA), RGBY(0xBB),
- RGBY(0xBC), RGBY(0xBD), RGBY(0xBE), RGBY(0xBF),
- RGBY(0xC0), RGBY(0xC1), RGBY(0xC2), RGBY(0xC3),
- RGBY(0xC4), RGBY(0xC5), RGBY(0xC6), RGBY(0xC7),
- RGBY(0xC8), RGBY(0xC9), RGBY(0xCA), RGBY(0xCB),
- RGBY(0xCC), RGBY(0xCD), RGBY(0xCE), RGBY(0xCF),
- RGBY(0xD0), RGBY(0xD1), RGBY(0xD2), RGBY(0xD3),
- RGBY(0xD4), RGBY(0xD5), RGBY(0xD6), RGBY(0xD7),
- RGBY(0xD8), RGBY(0xD9), RGBY(0xDA), RGBY(0xDB),
- RGBY(0xDC), RGBY(0xDD), RGBY(0xDE), RGBY(0xDF),
- RGBY(0xE0), RGBY(0xE1), RGBY(0xE2), RGBY(0xE3),
- RGBY(0xE4), RGBY(0xE5), RGBY(0xE6), RGBY(0xE7),
- RGBY(0xE8), RGBY(0xE9), RGBY(0xEA), RGBY(0xEB),
- RGBY(0xEC), RGBY(0xED), RGBY(0xEE), RGBY(0xEF),
- RGBY(0xF0), RGBY(0xF1), RGBY(0xF2), RGBY(0xF3),
- RGBY(0xF4), RGBY(0xF5), RGBY(0xF6), RGBY(0xF7),
- RGBY(0xF8), RGBY(0xF9), RGBY(0xFA), RGBY(0xFB),
- RGBY(0xFC), RGBY(0xFD), RGBY(0xFE), RGBY(0xFF),
-
- // Chroma U table.
- RGBU(0x00), RGBU(0x01), RGBU(0x02), RGBU(0x03),
- RGBU(0x04), RGBU(0x05), RGBU(0x06), RGBU(0x07),
- RGBU(0x08), RGBU(0x09), RGBU(0x0A), RGBU(0x0B),
- RGBU(0x0C), RGBU(0x0D), RGBU(0x0E), RGBU(0x0F),
- RGBU(0x10), RGBU(0x11), RGBU(0x12), RGBU(0x13),
- RGBU(0x14), RGBU(0x15), RGBU(0x16), RGBU(0x17),
- RGBU(0x18), RGBU(0x19), RGBU(0x1A), RGBU(0x1B),
- RGBU(0x1C), RGBU(0x1D), RGBU(0x1E), RGBU(0x1F),
- RGBU(0x20), RGBU(0x21), RGBU(0x22), RGBU(0x23),
- RGBU(0x24), RGBU(0x25), RGBU(0x26), RGBU(0x27),
- RGBU(0x28), RGBU(0x29), RGBU(0x2A), RGBU(0x2B),
- RGBU(0x2C), RGBU(0x2D), RGBU(0x2E), RGBU(0x2F),
- RGBU(0x30), RGBU(0x31), RGBU(0x32), RGBU(0x33),
- RGBU(0x34), RGBU(0x35), RGBU(0x36), RGBU(0x37),
- RGBU(0x38), RGBU(0x39), RGBU(0x3A), RGBU(0x3B),
- RGBU(0x3C), RGBU(0x3D), RGBU(0x3E), RGBU(0x3F),
- RGBU(0x40), RGBU(0x41), RGBU(0x42), RGBU(0x43),
- RGBU(0x44), RGBU(0x45), RGBU(0x46), RGBU(0x47),
- RGBU(0x48), RGBU(0x49), RGBU(0x4A), RGBU(0x4B),
- RGBU(0x4C), RGBU(0x4D), RGBU(0x4E), RGBU(0x4F),
- RGBU(0x50), RGBU(0x51), RGBU(0x52), RGBU(0x53),
- RGBU(0x54), RGBU(0x55), RGBU(0x56), RGBU(0x57),
- RGBU(0x58), RGBU(0x59), RGBU(0x5A), RGBU(0x5B),
- RGBU(0x5C), RGBU(0x5D), RGBU(0x5E), RGBU(0x5F),
- RGBU(0x60), RGBU(0x61), RGBU(0x62), RGBU(0x63),
- RGBU(0x64), RGBU(0x65), RGBU(0x66), RGBU(0x67),
- RGBU(0x68), RGBU(0x69), RGBU(0x6A), RGBU(0x6B),
- RGBU(0x6C), RGBU(0x6D), RGBU(0x6E), RGBU(0x6F),
- RGBU(0x70), RGBU(0x71), RGBU(0x72), RGBU(0x73),
- RGBU(0x74), RGBU(0x75), RGBU(0x76), RGBU(0x77),
- RGBU(0x78), RGBU(0x79), RGBU(0x7A), RGBU(0x7B),
- RGBU(0x7C), RGBU(0x7D), RGBU(0x7E), RGBU(0x7F),
- RGBU(0x80), RGBU(0x81), RGBU(0x82), RGBU(0x83),
- RGBU(0x84), RGBU(0x85), RGBU(0x86), RGBU(0x87),
- RGBU(0x88), RGBU(0x89), RGBU(0x8A), RGBU(0x8B),
- RGBU(0x8C), RGBU(0x8D), RGBU(0x8E), RGBU(0x8F),
- RGBU(0x90), RGBU(0x91), RGBU(0x92), RGBU(0x93),
- RGBU(0x94), RGBU(0x95), RGBU(0x96), RGBU(0x97),
- RGBU(0x98), RGBU(0x99), RGBU(0x9A), RGBU(0x9B),
- RGBU(0x9C), RGBU(0x9D), RGBU(0x9E), RGBU(0x9F),
- RGBU(0xA0), RGBU(0xA1), RGBU(0xA2), RGBU(0xA3),
- RGBU(0xA4), RGBU(0xA5), RGBU(0xA6), RGBU(0xA7),
- RGBU(0xA8), RGBU(0xA9), RGBU(0xAA), RGBU(0xAB),
- RGBU(0xAC), RGBU(0xAD), RGBU(0xAE), RGBU(0xAF),
- RGBU(0xB0), RGBU(0xB1), RGBU(0xB2), RGBU(0xB3),
- RGBU(0xB4), RGBU(0xB5), RGBU(0xB6), RGBU(0xB7),
- RGBU(0xB8), RGBU(0xB9), RGBU(0xBA), RGBU(0xBB),
- RGBU(0xBC), RGBU(0xBD), RGBU(0xBE), RGBU(0xBF),
- RGBU(0xC0), RGBU(0xC1), RGBU(0xC2), RGBU(0xC3),
- RGBU(0xC4), RGBU(0xC5), RGBU(0xC6), RGBU(0xC7),
- RGBU(0xC8), RGBU(0xC9), RGBU(0xCA), RGBU(0xCB),
- RGBU(0xCC), RGBU(0xCD), RGBU(0xCE), RGBU(0xCF),
- RGBU(0xD0), RGBU(0xD1), RGBU(0xD2), RGBU(0xD3),
- RGBU(0xD4), RGBU(0xD5), RGBU(0xD6), RGBU(0xD7),
- RGBU(0xD8), RGBU(0xD9), RGBU(0xDA), RGBU(0xDB),
- RGBU(0xDC), RGBU(0xDD), RGBU(0xDE), RGBU(0xDF),
- RGBU(0xE0), RGBU(0xE1), RGBU(0xE2), RGBU(0xE3),
- RGBU(0xE4), RGBU(0xE5), RGBU(0xE6), RGBU(0xE7),
- RGBU(0xE8), RGBU(0xE9), RGBU(0xEA), RGBU(0xEB),
- RGBU(0xEC), RGBU(0xED), RGBU(0xEE), RGBU(0xEF),
- RGBU(0xF0), RGBU(0xF1), RGBU(0xF2), RGBU(0xF3),
- RGBU(0xF4), RGBU(0xF5), RGBU(0xF6), RGBU(0xF7),
- RGBU(0xF8), RGBU(0xF9), RGBU(0xFA), RGBU(0xFB),
- RGBU(0xFC), RGBU(0xFD), RGBU(0xFE), RGBU(0xFF),
-
- // Chroma V table.
- RGBV(0x00), RGBV(0x01), RGBV(0x02), RGBV(0x03),
- RGBV(0x04), RGBV(0x05), RGBV(0x06), RGBV(0x07),
- RGBV(0x08), RGBV(0x09), RGBV(0x0A), RGBV(0x0B),
- RGBV(0x0C), RGBV(0x0D), RGBV(0x0E), RGBV(0x0F),
- RGBV(0x10), RGBV(0x11), RGBV(0x12), RGBV(0x13),
- RGBV(0x14), RGBV(0x15), RGBV(0x16), RGBV(0x17),
- RGBV(0x18), RGBV(0x19), RGBV(0x1A), RGBV(0x1B),
- RGBV(0x1C), RGBV(0x1D), RGBV(0x1E), RGBV(0x1F),
- RGBV(0x20), RGBV(0x21), RGBV(0x22), RGBV(0x23),
- RGBV(0x24), RGBV(0x25), RGBV(0x26), RGBV(0x27),
- RGBV(0x28), RGBV(0x29), RGBV(0x2A), RGBV(0x2B),
- RGBV(0x2C), RGBV(0x2D), RGBV(0x2E), RGBV(0x2F),
- RGBV(0x30), RGBV(0x31), RGBV(0x32), RGBV(0x33),
- RGBV(0x34), RGBV(0x35), RGBV(0x36), RGBV(0x37),
- RGBV(0x38), RGBV(0x39), RGBV(0x3A), RGBV(0x3B),
- RGBV(0x3C), RGBV(0x3D), RGBV(0x3E), RGBV(0x3F),
- RGBV(0x40), RGBV(0x41), RGBV(0x42), RGBV(0x43),
- RGBV(0x44), RGBV(0x45), RGBV(0x46), RGBV(0x47),
- RGBV(0x48), RGBV(0x49), RGBV(0x4A), RGBV(0x4B),
- RGBV(0x4C), RGBV(0x4D), RGBV(0x4E), RGBV(0x4F),
- RGBV(0x50), RGBV(0x51), RGBV(0x52), RGBV(0x53),
- RGBV(0x54), RGBV(0x55), RGBV(0x56), RGBV(0x57),
- RGBV(0x58), RGBV(0x59), RGBV(0x5A), RGBV(0x5B),
- RGBV(0x5C), RGBV(0x5D), RGBV(0x5E), RGBV(0x5F),
- RGBV(0x60), RGBV(0x61), RGBV(0x62), RGBV(0x63),
- RGBV(0x64), RGBV(0x65), RGBV(0x66), RGBV(0x67),
- RGBV(0x68), RGBV(0x69), RGBV(0x6A), RGBV(0x6B),
- RGBV(0x6C), RGBV(0x6D), RGBV(0x6E), RGBV(0x6F),
- RGBV(0x70), RGBV(0x71), RGBV(0x72), RGBV(0x73),
- RGBV(0x74), RGBV(0x75), RGBV(0x76), RGBV(0x77),
- RGBV(0x78), RGBV(0x79), RGBV(0x7A), RGBV(0x7B),
- RGBV(0x7C), RGBV(0x7D), RGBV(0x7E), RGBV(0x7F),
- RGBV(0x80), RGBV(0x81), RGBV(0x82), RGBV(0x83),
- RGBV(0x84), RGBV(0x85), RGBV(0x86), RGBV(0x87),
- RGBV(0x88), RGBV(0x89), RGBV(0x8A), RGBV(0x8B),
- RGBV(0x8C), RGBV(0x8D), RGBV(0x8E), RGBV(0x8F),
- RGBV(0x90), RGBV(0x91), RGBV(0x92), RGBV(0x93),
- RGBV(0x94), RGBV(0x95), RGBV(0x96), RGBV(0x97),
- RGBV(0x98), RGBV(0x99), RGBV(0x9A), RGBV(0x9B),
- RGBV(0x9C), RGBV(0x9D), RGBV(0x9E), RGBV(0x9F),
- RGBV(0xA0), RGBV(0xA1), RGBV(0xA2), RGBV(0xA3),
- RGBV(0xA4), RGBV(0xA5), RGBV(0xA6), RGBV(0xA7),
- RGBV(0xA8), RGBV(0xA9), RGBV(0xAA), RGBV(0xAB),
- RGBV(0xAC), RGBV(0xAD), RGBV(0xAE), RGBV(0xAF),
- RGBV(0xB0), RGBV(0xB1), RGBV(0xB2), RGBV(0xB3),
- RGBV(0xB4), RGBV(0xB5), RGBV(0xB6), RGBV(0xB7),
- RGBV(0xB8), RGBV(0xB9), RGBV(0xBA), RGBV(0xBB),
- RGBV(0xBC), RGBV(0xBD), RGBV(0xBE), RGBV(0xBF),
- RGBV(0xC0), RGBV(0xC1), RGBV(0xC2), RGBV(0xC3),
- RGBV(0xC4), RGBV(0xC5), RGBV(0xC6), RGBV(0xC7),
- RGBV(0xC8), RGBV(0xC9), RGBV(0xCA), RGBV(0xCB),
- RGBV(0xCC), RGBV(0xCD), RGBV(0xCE), RGBV(0xCF),
- RGBV(0xD0), RGBV(0xD1), RGBV(0xD2), RGBV(0xD3),
- RGBV(0xD4), RGBV(0xD5), RGBV(0xD6), RGBV(0xD7),
- RGBV(0xD8), RGBV(0xD9), RGBV(0xDA), RGBV(0xDB),
- RGBV(0xDC), RGBV(0xDD), RGBV(0xDE), RGBV(0xDF),
- RGBV(0xE0), RGBV(0xE1), RGBV(0xE2), RGBV(0xE3),
- RGBV(0xE4), RGBV(0xE5), RGBV(0xE6), RGBV(0xE7),
- RGBV(0xE8), RGBV(0xE9), RGBV(0xEA), RGBV(0xEB),
- RGBV(0xEC), RGBV(0xED), RGBV(0xEE), RGBV(0xEF),
- RGBV(0xF0), RGBV(0xF1), RGBV(0xF2), RGBV(0xF3),
- RGBV(0xF4), RGBV(0xF5), RGBV(0xF6), RGBV(0xF7),
- RGBV(0xF8), RGBV(0xF9), RGBV(0xFA), RGBV(0xFB),
- RGBV(0xFC), RGBV(0xFD), RGBV(0xFE), RGBV(0xFF),
-};
-
-#undef RGBY
-#undef RGBU
-#undef RGBV
-
-} // extern "C"
diff --git a/src/media/base/simd/yuv_to_rgb_table.h b/src/media/base/simd/yuv_to_rgb_table.h
deleted file mode 100644
index 0c43a7a..0000000
--- a/src/media/base/simd/yuv_to_rgb_table.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Defines convertion table from YUV to RGB.
-
-#ifndef MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
-#define MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
-
-#include "base/basictypes.h"
-#include "build/build_config.h"
-
-extern "C" {
-
-#if defined(COMPILER_MSVC)
-#define SIMD_ALIGNED(var) __declspec(align(16)) var
-#else
-#define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
-#endif
-
-// Align the table to 16-bytes to allow faster reading.
-extern SIMD_ALIGNED(int16 kCoefficientsRgbY[768][4]);
-
-} // extern "C"
-
-#endif // MEDIA_BASE_SIMD_YUV_TO_RGB_TABLE_H_
diff --git a/src/media/base/sinc_resampler.cc b/src/media/base/sinc_resampler.cc
deleted file mode 100644
index d104a1c..0000000
--- a/src/media/base/sinc_resampler.cc
+++ /dev/null
@@ -1,344 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Input buffer layout, dividing the total buffer into regions (r0_ - r5_):
-//
-// |----------------|-----------------------------------------|----------------|
-//
-// kBlockSize + kKernelSize / 2
-// <--------------------------------------------------------->
-// r0_
-//
-// kKernelSize / 2 kKernelSize / 2 kKernelSize / 2 kKernelSize / 2
-// <---------------> <---------------> <---------------> <--------------->
-// r1_ r2_ r3_ r4_
-//
-// kBlockSize
-// <--------------------------------------->
-// r5_
-//
-// The algorithm:
-//
-// 1) Consume input frames into r0_ (r1_ is zero-initialized).
-// 2) Position kernel centered at start of r0_ (r2_) and generate output frames
-// until kernel is centered at start of r4_ or we've finished generating all
-// the output frames.
-// 3) Copy r3_ to r1_ and r4_ to r2_.
-// 4) Consume input frames into r5_ (zero-pad if we run out of input).
-// 5) Goto (2) until all of input is consumed.
-//
-// Note: we're glossing over how the sub-sample handling works with
-// |virtual_source_idx_|, etc.
-
-#include "media/base/sinc_resampler.h"
-
-#include <cmath>
-
-#include "base/cpu.h"
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#include <arm_neon.h>
-#endif
-
-namespace media {
-
-namespace {
-
-enum {
- // The kernel size can be adjusted for quality (higher is better) at the
- // expense of performance. Must be a multiple of 32.
- // TODO(dalecurtis): Test performance to see if we can jack this up to 64+.
- kKernelSize = 32,
-
- // The number of destination frames generated per processing pass. Affects
- // how often and for how much SincResampler calls back for input. Must be
- // greater than kKernelSize.
- kBlockSize = 512,
-
- // The kernel offset count is used for interpolation and is the number of
- // sub-sample kernel shifts. Can be adjusted for quality (higher is better)
- // at the expense of allocating more memory.
- kKernelOffsetCount = 32,
- kKernelStorageSize = kKernelSize * (kKernelOffsetCount + 1),
-
- // The size (in samples) of the internal buffer used by the resampler.
- kBufferSize = kBlockSize + kKernelSize
-};
-
-} // namespace
-
-const int SincResampler::kMaximumLookAheadSize = kBufferSize;
-
-SincResampler::SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb)
- : io_sample_rate_ratio_(io_sample_rate_ratio),
- virtual_source_idx_(0),
- buffer_primed_(false),
- read_cb_(read_cb),
- // Create input buffers with a 16-byte alignment for SSE optimizations.
- kernel_storage_(static_cast<float*>(
- base::AlignedAlloc(sizeof(float) * kKernelStorageSize, 16))),
- input_buffer_(static_cast<float*>(
- base::AlignedAlloc(sizeof(float) * kBufferSize, 16))),
- // Setup various region pointers in the buffer (see diagram above).
- r0_(input_buffer_.get() + kKernelSize / 2),
- r1_(input_buffer_.get()),
- r2_(r0_),
- r3_(r0_ + kBlockSize - kKernelSize / 2),
- r4_(r0_ + kBlockSize),
- r5_(r0_ + kKernelSize / 2) {
- // Ensure kKernelSize is a multiple of 32 for easy SSE optimizations; causes
- // r0_ and r5_ (used for input) to always be 16-byte aligned by virtue of
- // input_buffer_ being 16-byte aligned.
- DCHECK_EQ(kKernelSize % 32, 0) << "kKernelSize must be a multiple of 32!";
- DCHECK_GT(kBlockSize, kKernelSize)
- << "kBlockSize must be greater than kKernelSize!";
- // Basic sanity checks to ensure buffer regions are laid out correctly:
- // r0_ and r2_ should always be the same position.
- DCHECK_EQ(r0_, r2_);
- // r1_ at the beginning of the buffer.
- DCHECK_EQ(r1_, input_buffer_.get());
- // r1_ left of r2_, r2_ left of r5_ and r1_, r2_ size correct.
- DCHECK_EQ(r2_ - r1_, r5_ - r2_);
- // r3_ left of r4_, r5_ left of r0_ and r3_ size correct.
- DCHECK_EQ(r4_ - r3_, r5_ - r0_);
- // r3_, r4_ size correct and r4_ at the end of the buffer.
- DCHECK_EQ(r4_ + (r4_ - r3_), r1_ + kBufferSize);
- // r5_ size correct and at the end of the buffer.
- DCHECK_EQ(r5_ + kBlockSize, r1_ + kBufferSize);
-
- memset(kernel_storage_.get(), 0,
- sizeof(*kernel_storage_.get()) * kKernelStorageSize);
- memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize);
-
- InitializeKernel();
-}
-
-SincResampler::~SincResampler() {}
-
-void SincResampler::InitializeKernel() {
- // Blackman window parameters.
- static const double kAlpha = 0.16;
- static const double kA0 = 0.5 * (1.0 - kAlpha);
- static const double kA1 = 0.5;
- static const double kA2 = 0.5 * kAlpha;
-
- // |sinc_scale_factor| is basically the normalized cutoff frequency of the
- // low-pass filter.
- double sinc_scale_factor =
- io_sample_rate_ratio_ > 1.0 ? 1.0 / io_sample_rate_ratio_ : 1.0;
-
- // The sinc function is an idealized brick-wall filter, but since we're
- // windowing it the transition from pass to stop does not happen right away.
- // So we should adjust the low pass filter cutoff slightly downward to avoid
- // some aliasing at the very high-end.
- // TODO(crogers): this value is empirical and to be more exact should vary
- // depending on kKernelSize.
- sinc_scale_factor *= 0.9;
-
- // Generates a set of windowed sinc() kernels.
- // We generate a range of sub-sample offsets from 0.0 to 1.0.
- for (int offset_idx = 0; offset_idx <= kKernelOffsetCount; ++offset_idx) {
- double subsample_offset =
- static_cast<double>(offset_idx) / kKernelOffsetCount;
-
- for (int i = 0; i < kKernelSize; ++i) {
- // Compute the sinc with offset.
- double s =
- sinc_scale_factor * M_PI * (i - kKernelSize / 2 - subsample_offset);
- double sinc = (!s ? 1.0 : sin(s) / s) * sinc_scale_factor;
-
- // Compute Blackman window, matching the offset of the sinc().
- double x = (i - subsample_offset) / kKernelSize;
- double window = kA0 - kA1 * cos(2.0 * M_PI * x) + kA2
- * cos(4.0 * M_PI * x);
-
- // Window the sinc() function and store at the correct offset.
- kernel_storage_.get()[i + offset_idx * kKernelSize] = sinc * window;
- }
- }
-}
-
-void SincResampler::Resample(float* destination, int frames) {
- int remaining_frames = frames;
-
- // Step (1) -- Prime the input buffer at the start of the input stream.
- if (!buffer_primed_) {
- read_cb_.Run(r0_, kBlockSize + kKernelSize / 2);
- buffer_primed_ = true;
- }
-
- // Step (2) -- Resample!
- while (remaining_frames) {
- while (virtual_source_idx_ < kBlockSize) {
- // |virtual_source_idx_| lies in between two kernel offsets so figure out
- // what they are.
- int source_idx = static_cast<int>(virtual_source_idx_);
- double subsample_remainder = virtual_source_idx_ - source_idx;
-
- double virtual_offset_idx = subsample_remainder * kKernelOffsetCount;
- int offset_idx = static_cast<int>(virtual_offset_idx);
-
- // We'll compute "convolutions" for the two kernels which straddle
- // |virtual_source_idx_|.
- float* k1 = kernel_storage_.get() + offset_idx * kKernelSize;
- float* k2 = k1 + kKernelSize;
-
- // Initialize input pointer based on quantized |virtual_source_idx_|.
- float* input_ptr = r1_ + source_idx;
-
- // Figure out how much to weight each kernel's "convolution".
- double kernel_interpolation_factor = virtual_offset_idx - offset_idx;
- *destination++ = Convolve(
- input_ptr, k1, k2, kernel_interpolation_factor);
-
- // Advance the virtual index.
- virtual_source_idx_ += io_sample_rate_ratio_;
-
- if (!--remaining_frames)
- return;
- }
-
- // Wrap back around to the start.
- virtual_source_idx_ -= kBlockSize;
-
- // Step (3) Copy r3_ to r1_ and r4_ to r2_.
- // This wraps the last input frames back to the start of the buffer.
- memcpy(r1_, r3_, sizeof(*input_buffer_.get()) * (kKernelSize / 2));
- memcpy(r2_, r4_, sizeof(*input_buffer_.get()) * (kKernelSize / 2));
-
- // Step (4)
- // Refresh the buffer with more input.
- read_cb_.Run(r5_, kBlockSize);
- }
-}
-
-int SincResampler::ChunkSize() {
- return kBlockSize / io_sample_rate_ratio_;
-}
-
-void SincResampler::Flush() {
- virtual_source_idx_ = 0;
- buffer_primed_ = false;
- memset(input_buffer_.get(), 0, sizeof(*input_buffer_.get()) * kBufferSize);
-}
-
-float SincResampler::Convolve(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- // Rely on function level static initialization to keep ConvolveProc selection
- // thread safe.
- typedef float (*ConvolveProc)(const float* src, const float* k1,
- const float* k2,
- double kernel_interpolation_factor);
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
- static const ConvolveProc kConvolveProc =
- base::CPU().has_sse() ? Convolve_SSE : Convolve_C;
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
- static const ConvolveProc kConvolveProc = Convolve_NEON;
-#else
- static const ConvolveProc kConvolveProc = Convolve_C;
-#endif
-
- return kConvolveProc(input_ptr, k1, k2, kernel_interpolation_factor);
-}
-
-float SincResampler::Convolve_C(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- float sum1 = 0;
- float sum2 = 0;
-
- // Generate a single output sample. Unrolling this loop hurt performance in
- // local testing.
- int n = kKernelSize;
- while (n--) {
- sum1 += *input_ptr * *k1++;
- sum2 += *input_ptr++ * *k2++;
- }
-
- // Linearly interpolate the two "convolutions".
- return (1.0 - kernel_interpolation_factor) * sum1
- + kernel_interpolation_factor * sum2;
-}
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
-float SincResampler::Convolve_SSE(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- // Ensure |k1|, |k2| are 16-byte aligned for SSE usage. Should always be true
- // so long as kKernelSize is a multiple of 16.
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k1) & 0x0F);
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(k2) & 0x0F);
-
- __m128 m_input;
- __m128 m_sums1 = _mm_setzero_ps();
- __m128 m_sums2 = _mm_setzero_ps();
-
- // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling
- // these loops hurt performance in local testing.
- if (reinterpret_cast<uintptr_t>(input_ptr) & 0x0F) {
- for (int i = 0; i < kKernelSize; i += 4) {
- m_input = _mm_loadu_ps(input_ptr + i);
- m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
- m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
- }
- } else {
- for (int i = 0; i < kKernelSize; i += 4) {
- m_input = _mm_load_ps(input_ptr + i);
- m_sums1 = _mm_add_ps(m_sums1, _mm_mul_ps(m_input, _mm_load_ps(k1 + i)));
- m_sums2 = _mm_add_ps(m_sums2, _mm_mul_ps(m_input, _mm_load_ps(k2 + i)));
- }
- }
-
- // Linearly interpolate the two "convolutions".
- m_sums1 = _mm_mul_ps(m_sums1, _mm_set_ps1(1.0 - kernel_interpolation_factor));
- m_sums2 = _mm_mul_ps(m_sums2, _mm_set_ps1(kernel_interpolation_factor));
- m_sums1 = _mm_add_ps(m_sums1, m_sums2);
-
- // Sum components together.
- float result;
- m_sums2 = _mm_add_ps(_mm_movehl_ps(m_sums1, m_sums1), m_sums1);
- _mm_store_ss(&result, _mm_add_ss(m_sums2, _mm_shuffle_ps(
- m_sums2, m_sums2, 1)));
-
- return result;
-}
-#endif
-
-#if defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-float SincResampler::Convolve_NEON(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor) {
- float32x4_t m_input;
- float32x4_t m_sums1 = vmovq_n_f32(0);
- float32x4_t m_sums2 = vmovq_n_f32(0);
-
- const float* upper = input_ptr + kKernelSize;
- for (; input_ptr < upper; ) {
- m_input = vld1q_f32(input_ptr);
- input_ptr += 4;
- m_sums1 = vmlaq_f32(m_sums1, m_input, vld1q_f32(k1));
- k1 += 4;
- m_sums2 = vmlaq_f32(m_sums2, m_input, vld1q_f32(k2));
- k2 += 4;
- }
-
- // Linearly interpolate the two "convolutions".
- m_sums1 = vmlaq_f32(
- vmulq_f32(m_sums1, vmovq_n_f32(1.0 - kernel_interpolation_factor)),
- m_sums2, vmovq_n_f32(kernel_interpolation_factor));
-
- // Sum components together.
- float32x2_t m_half = vadd_f32(vget_high_f32(m_sums1), vget_low_f32(m_sums1));
- return vget_lane_f32(vpadd_f32(m_half, m_half), 0);
-}
-#endif
-
-} // namespace media
diff --git a/src/media/base/sinc_resampler.h b/src/media/base/sinc_resampler.h
deleted file mode 100644
index a1d3cf7..0000000
--- a/src/media/base/sinc_resampler.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_SINC_RESAMPLER_H_
-#define MEDIA_BASE_SINC_RESAMPLER_H_
-
-#include "base/callback.h"
-#include "base/gtest_prod_util.h"
-#include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// SincResampler is a high-quality single-channel sample-rate converter.
-class MEDIA_EXPORT SincResampler {
- public:
- // The maximum number of samples that may be requested from the callback ahead
- // of the current position in the stream.
- static const int kMaximumLookAheadSize;
-
- // Callback type for providing more data into the resampler. Expects |frames|
- // of data to be rendered into |destination|; zero padded if not enough frames
- // are available to satisfy the request.
- typedef base::Callback<void(float* destination, int frames)> ReadCB;
-
- // Constructs a SincResampler with the specified |read_cb|, which is used to
- // acquire audio data for resampling. |io_sample_rate_ratio| is the ratio of
- // input / output sample rates.
- SincResampler(double io_sample_rate_ratio, const ReadCB& read_cb);
- virtual ~SincResampler();
-
- // Resample |frames| of data from |read_cb_| into |destination|.
- void Resample(float* destination, int frames);
-
- // The maximum size in frames that guarantees Resample() will only make a
- // single call to |read_cb_| for more data.
- int ChunkSize();
-
- // Flush all buffered data and reset internal indices.
- void Flush();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, Convolve);
- FRIEND_TEST_ALL_PREFIXES(SincResamplerTest, ConvolveBenchmark);
-
- void InitializeKernel();
-
- // Compute convolution of |k1| and |k2| over |input_ptr|, resultant sums are
- // linearly interpolated using |kernel_interpolation_factor|. On x86, the
- // underlying implementation is chosen at run time based on SSE support. On
- // ARM, NEON support is chosen at compile time based on compilation flags.
- static float Convolve(const float* input_ptr, const float* k1,
- const float* k2, double kernel_interpolation_factor);
- static float Convolve_C(const float* input_ptr, const float* k1,
- const float* k2, double kernel_interpolation_factor);
- static float Convolve_SSE(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor);
- static float Convolve_NEON(const float* input_ptr, const float* k1,
- const float* k2,
- double kernel_interpolation_factor);
-
- // The ratio of input / output sample rates.
- double io_sample_rate_ratio_;
-
- // An index on the source input buffer with sub-sample precision. It must be
- // double precision to avoid drift.
- double virtual_source_idx_;
-
- // The buffer is primed once at the very beginning of processing.
- bool buffer_primed_;
-
- // Source of data for resampling.
- ReadCB read_cb_;
-
- // Contains kKernelOffsetCount kernels back-to-back, each of size kKernelSize.
- // The kernel offsets are sub-sample shifts of a windowed sinc shifted from
- // 0.0 to 1.0 sample.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> kernel_storage_;
-
- // Data from the source is copied into this buffer for each processing pass.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_buffer_;
-
- // Pointers to the various regions inside |input_buffer_|. See the diagram at
- // the top of the .cc file for more information.
- float* const r0_;
- float* const r1_;
- float* const r2_;
- float* const r3_;
- float* const r4_;
- float* const r5_;
-
- DISALLOW_COPY_AND_ASSIGN(SincResampler);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_SINC_RESAMPLER_H_
diff --git a/src/media/base/sinc_resampler_unittest.cc b/src/media/base/sinc_resampler_unittest.cc
deleted file mode 100644
index 59a9f81..0000000
--- a/src/media/base/sinc_resampler_unittest.cc
+++ /dev/null
@@ -1,405 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MSVC++ requires this to be set before any other includes to get M_PI.
-#define _USE_MATH_DEFINES
-
-#include <cmath>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/string_number_conversions.h"
-#include "base/stringize_macros.h"
-#include "base/time.h"
-#include "build/build_config.h"
-#include "media/base/sinc_resampler.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using testing::_;
-
-namespace media {
-
-static const double kSampleRateRatio = 192000.0 / 44100.0;
-static const double kKernelInterpolationFactor = 0.5;
-
-// Command line switch for runtime adjustment of ConvolveBenchmark iterations.
-static const char kConvolveIterations[] = "convolve-iterations";
-
-// Helper class to ensure ChunkedResample() functions properly.
-class MockSource {
- public:
- MOCK_METHOD2(ProvideInput, void(float* destination, int frames));
-};
-
-ACTION(ClearBuffer) {
- memset(arg0, 0, arg1 * sizeof(float));
-}
-
-ACTION(FillBuffer) {
- // Value chosen arbitrarily such that SincResampler resamples it to something
- // easily representable on all platforms; e.g., using kSampleRateRatio this
- // becomes 1.81219.
- memset(arg0, 64, arg1 * sizeof(float));
-}
-
-// Test requesting multiples of ChunkSize() frames results in the proper number
-// of callbacks.
-TEST(SincResamplerTest, ChunkedResample) {
- MockSource mock_source;
-
- // Choose a high ratio of input to output samples which will result in quick
- // exhaustion of SincResampler's internal buffers.
- SincResampler resampler(
- kSampleRateRatio,
- base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
-
- static const int kChunks = 2;
- int max_chunk_size = resampler.ChunkSize() * kChunks;
- scoped_array<float> resampled_destination(new float[max_chunk_size]);
-
- // Verify requesting ChunkSize() frames causes a single callback.
- EXPECT_CALL(mock_source, ProvideInput(_, _))
- .Times(1).WillOnce(ClearBuffer());
- resampler.Resample(resampled_destination.get(), resampler.ChunkSize());
-
- // Verify requesting kChunks * ChunkSize() frames causes kChunks callbacks.
- testing::Mock::VerifyAndClear(&mock_source);
- EXPECT_CALL(mock_source, ProvideInput(_, _))
- .Times(kChunks).WillRepeatedly(ClearBuffer());
- resampler.Resample(resampled_destination.get(), max_chunk_size);
-}
-
-// Test flush resets the internal state properly.
-TEST(SincResamplerTest, Flush) {
- MockSource mock_source;
- SincResampler resampler(
- kSampleRateRatio,
- base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
- scoped_array<float> resampled_destination(new float[resampler.ChunkSize()]);
-
- // Fill the resampler with junk data.
- EXPECT_CALL(mock_source, ProvideInput(_, _))
- .Times(1).WillOnce(FillBuffer());
- resampler.Resample(resampled_destination.get(), resampler.ChunkSize() / 2);
- ASSERT_NE(resampled_destination[0], 0);
-
- // Flush and request more data, which should all be zeros now.
- resampler.Flush();
- testing::Mock::VerifyAndClear(&mock_source);
- EXPECT_CALL(mock_source, ProvideInput(_, _))
- .Times(1).WillOnce(ClearBuffer());
- resampler.Resample(resampled_destination.get(), resampler.ChunkSize() / 2);
- for (int i = 0; i < resampler.ChunkSize() / 2; ++i)
- ASSERT_FLOAT_EQ(resampled_destination[i], 0);
-}
-
-// Define platform independent function name for Convolve* tests.
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
-#define CONVOLVE_FUNC Convolve_SSE
-#elif defined(ARCH_CPU_ARM_FAMILY) && defined(USE_NEON)
-#define CONVOLVE_FUNC Convolve_NEON
-#endif
-
-// Ensure various optimized Convolve() methods return the same value. Only run
-// this test if other optimized methods exist, otherwise the default Convolve()
-// will be tested by the parameterized SincResampler tests below.
-#if defined(CONVOLVE_FUNC)
-TEST(SincResamplerTest, Convolve) {
- // Initialize a dummy resampler.
- MockSource mock_source;
- SincResampler resampler(
- kSampleRateRatio,
- base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
-
- // The optimized Convolve methods are slightly more precise than Convolve_C(),
- // so comparison must be done using an epsilon.
- static const double kEpsilon = 0.00000005;
-
- // Use a kernel from SincResampler as input and kernel data, this has the
- // benefit of already being properly sized and aligned for Convolve_SSE().
- double result = resampler.Convolve_C(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- double result2 = resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- EXPECT_NEAR(result2, result, kEpsilon);
-
- // Test Convolve() w/ unaligned input pointer.
- result = resampler.Convolve_C(
- resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- result2 = resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- EXPECT_NEAR(result2, result, kEpsilon);
-}
-#endif
-
-// Benchmark for the various Convolve() methods. Make sure to build with
-// branding=Chrome so that DCHECKs are compiled out when benchmarking. Original
-// benchmarks were run with --convolve-iterations=50000000.
-TEST(SincResamplerTest, ConvolveBenchmark) {
- // Initialize a dummy resampler.
- MockSource mock_source;
- SincResampler resampler(
- kSampleRateRatio,
- base::Bind(&MockSource::ProvideInput, base::Unretained(&mock_source)));
-
- // Retrieve benchmark iterations from command line.
- int convolve_iterations = 10;
- std::string iterations(CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kConvolveIterations));
- if (!iterations.empty())
- base::StringToInt(iterations, &convolve_iterations);
-
- printf("Benchmarking %d iterations:\n", convolve_iterations);
-
- // Benchmark Convolve_C().
- base::TimeTicks start = base::TimeTicks::HighResNow();
- for (int i = 0; i < convolve_iterations; ++i) {
- resampler.Convolve_C(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_c_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("Convolve_C took %.2fms.\n", total_time_c_ms);
-
-#if defined(CONVOLVE_FUNC)
- // Benchmark with unaligned input pointer.
- start = base::TimeTicks::HighResNow();
- for (int j = 0; j < convolve_iterations; ++j) {
- resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_optimized_unaligned_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(CONVOLVE_FUNC) "(unaligned) took %.2fms; which is %.2fx "
- "faster than Convolve_C.\n", total_time_optimized_unaligned_ms,
- total_time_c_ms / total_time_optimized_unaligned_ms);
-
- // Benchmark with aligned input pointer.
- start = base::TimeTicks::HighResNow();
- for (int j = 0; j < convolve_iterations; ++j) {
- resampler.CONVOLVE_FUNC(
- resampler.kernel_storage_.get(), resampler.kernel_storage_.get(),
- resampler.kernel_storage_.get(), kKernelInterpolationFactor);
- }
- double total_time_optimized_aligned_ms =
- (base::TimeTicks::HighResNow() - start).InMillisecondsF();
- printf(STRINGIZE(CONVOLVE_FUNC) " (aligned) took %.2fms; which is %.2fx "
- "faster than Convolve_C and %.2fx faster than "
- STRINGIZE(CONVOLVE_FUNC) " (unaligned).\n",
- total_time_optimized_aligned_ms,
- total_time_c_ms / total_time_optimized_aligned_ms,
- total_time_optimized_unaligned_ms / total_time_optimized_aligned_ms);
-#endif
-}
-
-#undef CONVOLVE_FUNC
-
-// Fake audio source for testing the resampler. Generates a sinusoidal linear
-// chirp (http://en.wikipedia.org/wiki/Chirp) which can be tuned to stress the
-// resampler for the specific sample rate conversion being used.
-class SinusoidalLinearChirpSource {
- public:
- SinusoidalLinearChirpSource(int sample_rate, int samples,
- double max_frequency)
- : sample_rate_(sample_rate),
- total_samples_(samples),
- max_frequency_(max_frequency),
- current_index_(0) {
- // Chirp rate.
- double duration = static_cast<double>(total_samples_) / sample_rate_;
- k_ = (max_frequency_ - kMinFrequency) / duration;
- }
-
- virtual ~SinusoidalLinearChirpSource() {}
-
- void ProvideInput(float* destination, int frames) {
- for (int i = 0; i < frames; ++i, ++current_index_) {
- // Filter out frequencies higher than Nyquist.
- if (Frequency(current_index_) > 0.5 * sample_rate_) {
- destination[i] = 0;
- } else {
- // Calculate time in seconds.
- double t = static_cast<double>(current_index_) / sample_rate_;
-
- // Sinusoidal linear chirp.
- destination[i] = sin(2 * M_PI * (kMinFrequency * t + (k_ / 2) * t * t));
- }
- }
- }
-
- double Frequency(int position) {
- return kMinFrequency + position * (max_frequency_ - kMinFrequency)
- / total_samples_;
- }
-
- private:
- enum {
- kMinFrequency = 5
- };
-
- double sample_rate_;
- int total_samples_;
- double max_frequency_;
- double k_;
- int current_index_;
-
- DISALLOW_COPY_AND_ASSIGN(SinusoidalLinearChirpSource);
-};
-
-typedef std::tr1::tuple<int, int, double, double> SincResamplerTestData;
-class SincResamplerTest
- : public testing::TestWithParam<SincResamplerTestData> {
- public:
- SincResamplerTest()
- : input_rate_(std::tr1::get<0>(GetParam())),
- output_rate_(std::tr1::get<1>(GetParam())),
- rms_error_(std::tr1::get<2>(GetParam())),
- low_freq_error_(std::tr1::get<3>(GetParam())) {
- }
-
- virtual ~SincResamplerTest() {}
-
- protected:
- int input_rate_;
- int output_rate_;
- double rms_error_;
- double low_freq_error_;
-};
-
-// Tests resampling using a given input and output sample rate.
-TEST_P(SincResamplerTest, Resample) {
- // Make comparisons using one second of data.
- static const double kTestDurationSecs = 1;
- int input_samples = kTestDurationSecs * input_rate_;
- int output_samples = kTestDurationSecs * output_rate_;
-
- // Nyquist frequency for the input sampling rate.
- double input_nyquist_freq = 0.5 * input_rate_;
-
- // Source for data to be resampled.
- SinusoidalLinearChirpSource resampler_source(
- input_rate_, input_samples, input_nyquist_freq);
-
- SincResampler resampler(
- input_rate_ / static_cast<double>(output_rate_),
- base::Bind(&SinusoidalLinearChirpSource::ProvideInput,
- base::Unretained(&resampler_source)));
-
- // TODO(dalecurtis): If we switch to AVX/SSE optimization, we'll need to
- // allocate these on 32-byte boundaries and ensure they're sized % 32 bytes.
- scoped_array<float> resampled_destination(new float[output_samples]);
- scoped_array<float> pure_destination(new float[output_samples]);
-
- // Generate resampled signal.
- resampler.Resample(resampled_destination.get(), output_samples);
-
- // Generate pure signal.
- SinusoidalLinearChirpSource pure_source(
- output_rate_, output_samples, input_nyquist_freq);
- pure_source.ProvideInput(pure_destination.get(), output_samples);
-
- // Range of the Nyquist frequency (0.5 * min(input rate, output_rate)) which
- // we refer to as low and high.
- static const double kLowFrequencyNyquistRange = 0.7;
- static const double kHighFrequencyNyquistRange = 0.9;
-
- // Calculate Root-Mean-Square-Error and maximum error for the resampling.
- double sum_of_squares = 0;
- double low_freq_max_error = 0;
- double high_freq_max_error = 0;
- int minimum_rate = std::min(input_rate_, output_rate_);
- double low_frequency_range = kLowFrequencyNyquistRange * 0.5 * minimum_rate;
- double high_frequency_range = kHighFrequencyNyquistRange * 0.5 * minimum_rate;
- for (int i = 0; i < output_samples; ++i) {
- double error = fabs(resampled_destination[i] - pure_destination[i]);
-
- if (pure_source.Frequency(i) < low_frequency_range) {
- if (error > low_freq_max_error)
- low_freq_max_error = error;
- } else if (pure_source.Frequency(i) < high_frequency_range) {
- if (error > high_freq_max_error)
- high_freq_max_error = error;
- }
- // TODO(dalecurtis): Sanity check frequencies > kHighFrequencyNyquistRange.
-
- sum_of_squares += error * error;
- }
-
- double rms_error = sqrt(sum_of_squares / output_samples);
-
- // Convert each error to dbFS.
- #define DBFS(x) 20 * log10(x)
- rms_error = DBFS(rms_error);
- low_freq_max_error = DBFS(low_freq_max_error);
- high_freq_max_error = DBFS(high_freq_max_error);
-
- EXPECT_LE(rms_error, rms_error_);
- EXPECT_LE(low_freq_max_error, low_freq_error_);
-
- // All conversions currently have a high frequency error around -6 dbFS.
- static const double kHighFrequencyMaxError = -6.02;
- EXPECT_LE(high_freq_max_error, kHighFrequencyMaxError);
-}
-
-// Almost all conversions have an RMS error of around -14 dbFS.
-static const double kResamplingRMSError = -14.58;
-
-// Thresholds chosen arbitrarily based on what each resampling reported during
-// testing. All thresholds are in dbFS, http://en.wikipedia.org/wiki/DBFS.
-INSTANTIATE_TEST_CASE_P(
- SincResamplerTest, SincResamplerTest, testing::Values(
- // To 44.1kHz
- std::tr1::make_tuple(8000, 44100, kResamplingRMSError, -62.73),
- std::tr1::make_tuple(11025, 44100, kResamplingRMSError, -72.19),
- std::tr1::make_tuple(16000, 44100, kResamplingRMSError, -62.54),
- std::tr1::make_tuple(22050, 44100, kResamplingRMSError, -73.53),
- std::tr1::make_tuple(32000, 44100, kResamplingRMSError, -63.32),
- std::tr1::make_tuple(44100, 44100, kResamplingRMSError, -73.53),
- std::tr1::make_tuple(48000, 44100, -15.01, -64.04),
- std::tr1::make_tuple(96000, 44100, -18.49, -25.51),
- std::tr1::make_tuple(192000, 44100, -20.50, -13.31),
-
- // To 48kHz
- std::tr1::make_tuple(8000, 48000, kResamplingRMSError, -63.43),
- std::tr1::make_tuple(11025, 48000, kResamplingRMSError, -62.61),
- std::tr1::make_tuple(16000, 48000, kResamplingRMSError, -63.96),
- std::tr1::make_tuple(22050, 48000, kResamplingRMSError, -62.42),
- std::tr1::make_tuple(32000, 48000, kResamplingRMSError, -64.04),
- std::tr1::make_tuple(44100, 48000, kResamplingRMSError, -62.63),
- std::tr1::make_tuple(48000, 48000, kResamplingRMSError, -73.52),
- std::tr1::make_tuple(96000, 48000, -18.40, -28.44),
- std::tr1::make_tuple(192000, 48000, -20.43, -14.11),
-
- // To 96kHz
- std::tr1::make_tuple(8000, 96000, kResamplingRMSError, -63.19),
- std::tr1::make_tuple(11025, 96000, kResamplingRMSError, -62.61),
- std::tr1::make_tuple(16000, 96000, kResamplingRMSError, -63.39),
- std::tr1::make_tuple(22050, 96000, kResamplingRMSError, -62.42),
- std::tr1::make_tuple(32000, 96000, kResamplingRMSError, -63.95),
- std::tr1::make_tuple(44100, 96000, kResamplingRMSError, -62.63),
- std::tr1::make_tuple(48000, 96000, kResamplingRMSError, -73.52),
- std::tr1::make_tuple(96000, 96000, kResamplingRMSError, -73.52),
- std::tr1::make_tuple(192000, 96000, kResamplingRMSError, -28.41),
-
- // To 192kHz
- std::tr1::make_tuple(8000, 192000, kResamplingRMSError, -63.10),
- std::tr1::make_tuple(11025, 192000, kResamplingRMSError, -62.61),
- std::tr1::make_tuple(16000, 192000, kResamplingRMSError, -63.14),
- std::tr1::make_tuple(22050, 192000, kResamplingRMSError, -62.42),
- std::tr1::make_tuple(32000, 192000, kResamplingRMSError, -63.38),
- std::tr1::make_tuple(44100, 192000, kResamplingRMSError, -62.63),
- std::tr1::make_tuple(48000, 192000, kResamplingRMSError, -73.44),
- std::tr1::make_tuple(96000, 192000, kResamplingRMSError, -73.52),
- std::tr1::make_tuple(192000, 192000, kResamplingRMSError, -73.52)));
-
-} // namespace media
diff --git a/src/media/base/stream_parser.cc b/src/media/base/stream_parser.cc
deleted file mode 100644
index 1240919..0000000
--- a/src/media/base/stream_parser.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/stream_parser.h"
-
-namespace media {
-
-StreamParser::StreamParser() {}
-
-StreamParser::~StreamParser() {}
-
-} // namespace media
diff --git a/src/media/base/stream_parser.h b/src/media/base/stream_parser.h
deleted file mode 100644
index 5c5e6e6..0000000
--- a/src/media/base/stream_parser.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_STREAM_PARSER_H_
-#define MEDIA_BASE_STREAM_PARSER_H_
-
-#include <deque>
-#include <string>
-
-#include "base/callback_forward.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-#include "media/base/media_log.h"
-
-namespace media {
-
-class AudioDecoderConfig;
-class StreamParserBuffer;
-class VideoDecoderConfig;
-
-// Abstract interface for parsing media byte streams.
-class MEDIA_EXPORT StreamParser {
- public:
- typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
-
- StreamParser();
- virtual ~StreamParser();
-
- // Indicates completion of parser initialization.
- // First parameter - Indicates initialization success. Set to true if
- // initialization was successful. False if an error
- // occurred.
- // Second parameter - Indicates the stream duration. Only contains a valid
- // value if the first parameter is true.
- typedef base::Callback<void(bool, base::TimeDelta)> InitCB;
-
- // Indicates when new stream configurations have been parsed.
- // First parameter - The new audio configuration. If the config is not valid
- // then it means that there isn't an audio stream.
- // Second parameter - The new video configuration. If the config is not valid
- // then it means that there isn't an audio stream.
- // Return value - True if the new configurations are accepted.
- // False if the new configurations are not supported
- // and indicates that a parsing error should be signalled.
- typedef base::Callback<bool(const AudioDecoderConfig&,
- const VideoDecoderConfig&)> NewConfigCB;
-
- // New stream buffers have been parsed.
- // First parameter - A queue of newly parsed buffers.
- // Return value - True indicates that the buffers are accepted.
- // False if something was wrong with the buffers and a parsing
- // error should be signalled.
- typedef base::Callback<bool(const BufferQueue&)> NewBuffersCB;
-
- // Signals the beginning of a new media segment.
- // First parameter - The earliest timestamp of all the streams in the segment.
- typedef base::Callback<void(base::TimeDelta)> NewMediaSegmentCB;
-
- // A new potentially encrypted stream has been parsed.
- // First parameter - The type of the initialization data associated with the
- // stream.
- // Second parameter - The initialization data associated with the stream.
- // Third parameter - Number of bytes of the initialization data.
- // Return value - True indicates that the initialization data is accepted.
- // False if something was wrong with the initialization data
- // and a parsing error should be signalled.
- typedef base::Callback<bool(const std::string&,
- scoped_array<uint8>, int)> NeedKeyCB;
-
- // Initialize the parser with necessary callbacks. Must be called before any
- // data is passed to Parse(). |init_cb| will be called once enough data has
- // been parsed to determine the initial stream configurations, presentation
- // start time, and duration.
- virtual void Init(const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& audio_cb,
- const NewBuffersCB& video_cb,
- const NeedKeyCB& need_key_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) = 0;
-
- // Called when a seek occurs. This flushes the current parser state
- // and puts the parser in a state where it can receive data for the new seek
- // point.
- virtual void Flush() = 0;
-
- // Called when there is new data to parse.
- //
- // Returns true if the parse succeeds.
- virtual bool Parse(const uint8* buf, int size) = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StreamParser);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_STREAM_PARSER_H_
diff --git a/src/media/base/stream_parser_buffer.cc b/src/media/base/stream_parser_buffer.cc
deleted file mode 100644
index 6a6c4a5..0000000
--- a/src/media/base/stream_parser_buffer.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/stream_parser_buffer.h"
-
-#include "base/logging.h"
-#include "media/base/shell_buffer_factory.h"
-
-namespace media {
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-scoped_refptr<StreamParserBuffer> StreamParserBuffer::CreateEOSBuffer() {
- return make_scoped_refptr(new StreamParserBuffer(NULL, 0, false));
-}
-
-scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
- const uint8* data,
- int data_size,
- bool is_keyframe) {
- if (!data || data_size == 0) {
- return CreateEOSBuffer();
- }
- DCHECK(data);
- DCHECK_GT(data_size, 0);
-
- uint8* buffer_bytes = ShellBufferFactory::Instance()->AllocateNow(data_size);
- if (!buffer_bytes) {
- return NULL;
- }
- // copy the data over to the resuable buffer area
- memcpy(buffer_bytes, data, data_size);
- return make_scoped_refptr(
- new StreamParserBuffer(buffer_bytes,
- data_size,
- is_keyframe));
-}
-#else
-scoped_refptr<StreamParserBuffer> StreamParserBuffer::CopyFrom(
- const uint8* data, int data_size, bool is_keyframe) {
- return make_scoped_refptr(
- new StreamParserBuffer(data, data_size, is_keyframe));
-}
-#endif
-
-base::TimeDelta StreamParserBuffer::GetDecodeTimestamp() const {
- if (decode_timestamp_ == kNoTimestamp())
- return GetTimestamp();
- return decode_timestamp_;
-}
-
-void StreamParserBuffer::SetDecodeTimestamp(const base::TimeDelta& timestamp) {
- decode_timestamp_ = timestamp;
-}
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-StreamParserBuffer::StreamParserBuffer(uint8* data,
- int data_size,
- bool is_keyframe)
- : DecoderBuffer(data, data_size, is_keyframe),
- decode_timestamp_(kNoTimestamp()),
- config_id_(kInvalidConfigId) {
- SetDuration(kNoTimestamp());
-}
-#else
-StreamParserBuffer::StreamParserBuffer(const uint8* data,
- int data_size,
- bool is_keyframe)
- : DecoderBuffer(data, data_size, is_keyframe),
- decode_timestamp_(kNoTimestamp()),
- config_id_(kInvalidConfigId) {
- SetDuration(kNoTimestamp());
-}
-#endif
-
-StreamParserBuffer::~StreamParserBuffer() {
-}
-
-int StreamParserBuffer::GetConfigId() const {
- return config_id_;
-}
-
-void StreamParserBuffer::SetConfigId(int config_id) {
- config_id_ = config_id;
-}
-
-} // namespace media
diff --git a/src/media/base/stream_parser_buffer.h b/src/media/base/stream_parser_buffer.h
deleted file mode 100644
index a5471d9..0000000
--- a/src/media/base/stream_parser_buffer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_STREAM_PARSER_BUFFER_H_
-#define MEDIA_BASE_STREAM_PARSER_BUFFER_H_
-
-#include "media/base/decoder_buffer.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
- public:
- // Value used to signal an invalid decoder config ID.
- enum { kInvalidConfigId = -1 };
-
- static scoped_refptr<StreamParserBuffer> CreateEOSBuffer();
- static scoped_refptr<StreamParserBuffer> CopyFrom(
- const uint8* data, int data_size, bool is_keyframe);
-
- // Decode timestamp. If not explicitly set, or set to kNoTimestamp(), the
- // value will be taken from the normal timestamp.
- base::TimeDelta GetDecodeTimestamp() const;
- void SetDecodeTimestamp(const base::TimeDelta& timestamp);
-
- // Gets/sets the ID of the decoder config associated with this
- // buffer.
- int GetConfigId() const;
- void SetConfigId(int config_id);
-
- private:
-#if defined(__LB_SHELL__) || defined(COBALT)
- StreamParserBuffer(uint8* data, int data_size, bool is_keyframe);
-#else
- StreamParserBuffer(const uint8* data, int data_size, bool is_keyframe);
-#endif
- virtual ~StreamParserBuffer();
-
- base::TimeDelta decode_timestamp_;
- int config_id_;
- DISALLOW_COPY_AND_ASSIGN(StreamParserBuffer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_STREAM_PARSER_BUFFER_H_
diff --git a/src/media/base/test_data_util.cc b/src/media/base/test_data_util.cc
deleted file mode 100644
index 134655d..0000000
--- a/src/media/base/test_data_util.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/test_data_util.h"
-
-#include "base/file_util.h"
-#include "base/logging.h"
-#include "base/path_service.h"
-#include "media/base/decoder_buffer.h"
-
-namespace media {
-
-FilePath GetTestDataFilePath(const std::string& name) {
- FilePath file_path;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
-
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .AppendASCII(name);
- return file_path;
-}
-
-scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
- FilePath file_path;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
-
- file_path = file_path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .AppendASCII(name);
-
- int64 tmp = 0;
- CHECK(file_util::GetFileSize(file_path, &tmp))
- << "Failed to get file size for '" << name << "'";
-
- int file_size = static_cast<int>(tmp);
-
- scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(file_size));
- CHECK_EQ(file_size, file_util::ReadFile(
- file_path, reinterpret_cast<char*>(buffer->GetWritableData()), file_size))
- << "Failed to read '" << name << "'";
-
- return buffer;
-}
-
-} // namespace media
diff --git a/src/media/base/test_data_util.h b/src/media/base/test_data_util.h
deleted file mode 100644
index 062bbda..0000000
--- a/src/media/base/test_data_util.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_TEST_DATA_UTIL_H_
-#define MEDIA_BASE_TEST_DATA_UTIL_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/file_path.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-
-namespace media {
-
-class DecoderBuffer;
-
-// Returns a file path for a file in the media/test/data directory.
-FilePath GetTestDataFilePath(const std::string& name);
-
-// Reads a test file from media/test/data directory and stores it in
-// a DecoderBuffer. Use DecoderBuffer vs DataBuffer to ensure no matter
-// what a test does, it's safe to use FFmpeg methods.
-//
-// |name| - The name of the file.
-// |buffer| - The contents of the file.
-scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name);
-
-} // namespace media
-
-#endif // MEDIA_BASE_TEST_DATA_UTIL_H_
diff --git a/src/media/base/test_helpers.cc b/src/media/base/test_helpers.cc
deleted file mode 100644
index 862c9d4..0000000
--- a/src/media/base/test_helpers.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/test_helpers.h"
-
-#include "base/bind.h"
-#include "base/message_loop.h"
-#include "base/test/test_timeouts.h"
-#include "base/timer.h"
-#include "media/base/bind_to_loop.h"
-
-using ::testing::_;
-using ::testing::StrictMock;
-
-namespace media {
-
-// Utility mock for testing methods expecting Closures and PipelineStatusCBs.
-class MockCallback : public base::RefCountedThreadSafe<MockCallback> {
- public:
- MockCallback();
- MOCK_METHOD0(Run, void());
- MOCK_METHOD1(RunWithStatus, void(PipelineStatus));
-
- protected:
- friend class base::RefCountedThreadSafe<MockCallback>;
- virtual ~MockCallback();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockCallback);
-};
-
-MockCallback::MockCallback() {}
-MockCallback::~MockCallback() {}
-
-base::Closure NewExpectedClosure() {
- StrictMock<MockCallback>* callback = new StrictMock<MockCallback>();
- EXPECT_CALL(*callback, Run());
- return base::Bind(&MockCallback::Run, callback);
-}
-
-PipelineStatusCB NewExpectedStatusCB(PipelineStatus status) {
- StrictMock<MockCallback>* callback = new StrictMock<MockCallback>();
- EXPECT_CALL(*callback, RunWithStatus(status));
- return base::Bind(&MockCallback::RunWithStatus, callback);
-}
-
-WaitableMessageLoopEvent::WaitableMessageLoopEvent()
- : message_loop_(MessageLoop::current()),
- signaled_(false),
- status_(PIPELINE_OK) {
- DCHECK(message_loop_);
-}
-
-WaitableMessageLoopEvent::~WaitableMessageLoopEvent() {}
-
-base::Closure WaitableMessageLoopEvent::GetClosure() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
- &WaitableMessageLoopEvent::OnCallback, base::Unretained(this),
- PIPELINE_OK));
-}
-
-PipelineStatusCB WaitableMessageLoopEvent::GetPipelineStatusCB() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- return BindToLoop(message_loop_->message_loop_proxy(), base::Bind(
- &WaitableMessageLoopEvent::OnCallback, base::Unretained(this)));
-}
-
-void WaitableMessageLoopEvent::RunAndWait() {
- RunAndWaitForStatus(PIPELINE_OK);
-}
-
-void WaitableMessageLoopEvent::RunAndWaitForStatus(PipelineStatus expected) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- base::Timer timer(false, false);
- timer.Start(FROM_HERE, TestTimeouts::action_timeout(), base::Bind(
- &WaitableMessageLoopEvent::OnTimeout, base::Unretained(this)));
-
- DCHECK(!signaled_) << "Already signaled";
- message_loop_->Run();
- EXPECT_TRUE(signaled_);
- EXPECT_EQ(expected, status_);
-}
-
-void WaitableMessageLoopEvent::OnCallback(PipelineStatus status) {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- signaled_ = true;
- status_ = status;
- message_loop_->QuitWhenIdle();
-}
-
-void WaitableMessageLoopEvent::OnTimeout() {
- DCHECK_EQ(message_loop_, MessageLoop::current());
- ADD_FAILURE() << "Timed out waiting for message loop to quit";
- message_loop_->QuitWhenIdle();
-}
-
-} // namespace media
diff --git a/src/media/base/test_helpers.h b/src/media/base/test_helpers.h
deleted file mode 100644
index 1c38694..0000000
--- a/src/media/base/test_helpers.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_TEST_HELPERS_H_
-#define MEDIA_BASE_TEST_HELPERS_H_
-
-#include "base/callback.h"
-#include "media/base/pipeline_status.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-class MessageLoop;
-
-namespace media {
-
-// Return a callback that expects to be run once.
-base::Closure NewExpectedClosure();
-PipelineStatusCB NewExpectedStatusCB(PipelineStatus status);
-
-// Helper class for running a message loop until a callback has run. Useful for
-// testing classes that run on more than a single thread.
-//
-// Events are intended for single use and cannot be reset.
-class WaitableMessageLoopEvent {
- public:
- WaitableMessageLoopEvent();
- ~WaitableMessageLoopEvent();
-
- // Returns a thread-safe closure that will signal |this| when executed.
- base::Closure GetClosure();
- PipelineStatusCB GetPipelineStatusCB();
-
- // Runs the current message loop until |this| has been signaled.
- //
- // Fails the test if the timeout is reached.
- void RunAndWait();
-
- // Runs the current message loop until |this| has been signaled and asserts
- // that the |expected| status was received.
- //
- // Fails the test if the timeout is reached.
- void RunAndWaitForStatus(PipelineStatus expected);
-
- private:
- void OnCallback(PipelineStatus status);
- void OnTimeout();
-
- MessageLoop* message_loop_;
- bool signaled_;
- PipelineStatus status_;
-
- DISALLOW_COPY_AND_ASSIGN(WaitableMessageLoopEvent);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_TEST_HELPERS_H_
diff --git a/src/media/base/vector_math.cc b/src/media/base/vector_math.cc
deleted file mode 100644
index edd95cd..0000000
--- a/src/media/base/vector_math.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/vector_math.h"
-#include "media/base/vector_math_testing.h"
-
-#include "base/cpu.h"
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-namespace media {
-namespace vector_math {
-
-void FMAC(const float src[], float scale, int len, float dest[]) {
- // Ensure |src| and |dest| are 16-byte aligned.
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(src) & (kRequiredAlignment - 1));
- DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(dest) & (kRequiredAlignment - 1));
-
- // Rely on function level static initialization to keep VectorFMACProc
- // selection thread safe.
- typedef void (*VectorFMACProc)(const float src[], float scale, int len,
- float dest[]);
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
- static const VectorFMACProc kVectorFMACProc =
- base::CPU().has_sse() ? FMAC_SSE : FMAC_C;
-#else
- static const VectorFMACProc kVectorFMACProc = FMAC_C;
-#endif
-
- return kVectorFMACProc(src, scale, len, dest);
-}
-
-void FMAC_C(const float src[], float scale, int len, float dest[]) {
- for (int i = 0; i < len; ++i)
- dest[i] += src[i] * scale;
-}
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
-void FMAC_SSE(const float src[], float scale, int len, float dest[]) {
- __m128 m_scale = _mm_set_ps1(scale);
- int rem = len % 4;
- for (int i = 0; i < len - rem; i += 4) {
- _mm_store_ps(dest + i, _mm_add_ps(_mm_load_ps(dest + i),
- _mm_mul_ps(_mm_load_ps(src + i), m_scale)));
- }
-
- // Handle any remaining values that wouldn't fit in an SSE pass.
- if (rem)
- FMAC_C(src + len - rem, scale, rem, dest + len - rem);
-}
-#endif
-
-} // namespace vector_math
-} // namespace media
diff --git a/src/media/base/vector_math.h b/src/media/base/vector_math.h
deleted file mode 100644
index 10c3039..0000000
--- a/src/media/base/vector_math.h
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VECTOR_MATH_H_
-#define MEDIA_BASE_VECTOR_MATH_H_
-
-#include "media/base/media_export.h"
-
-namespace media {
-namespace vector_math {
-
-// Required alignment for inputs and outputs to all vector math functions
-enum { kRequiredAlignment = 16 };
-
-// Multiply each element of |src| (up to |len|) by |scale| and add to |dest|.
-// |src| and |dest| must be aligned by kRequiredAlignment.
-MEDIA_EXPORT void FMAC(const float src[], float scale, int len, float dest[]);
-
-} // namespace vector_math
-} // namespace media
-
-#endif // MEDIA_BASE_VECTOR_MATH_H_
diff --git a/src/media/base/vector_math_testing.h b/src/media/base/vector_math_testing.h
deleted file mode 100644
index d364b74..0000000
--- a/src/media/base/vector_math_testing.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VECTOR_MATH_TESTING_H_
-#define MEDIA_BASE_VECTOR_MATH_TESTING_H_
-
-#include "media/base/media_export.h"
-
-namespace media {
-namespace vector_math {
-
-// Optimized versions of FMAC() function exposed for testing. See vector_math.h
-// for details.
-MEDIA_EXPORT void FMAC_C(const float src[], float scale, int len, float dest[]);
-MEDIA_EXPORT void FMAC_SSE(const float src[], float scale, int len,
- float dest[]);
-
-} // namespace vector_math
-} // namespace media
-
-#endif // MEDIA_BASE_VECTOR_MATH_TESTING_H_
diff --git a/src/media/base/vector_math_unittest.cc b/src/media/base/vector_math_unittest.cc
deleted file mode 100644
index 153378e..0000000
--- a/src/media/base/vector_math_unittest.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MSVC++ requires this to be set before any other includes to get M_PI.
-#define _USE_MATH_DEFINES
-#include <cmath>
-
-#include "base/command_line.h"
-#include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/string_number_conversions.h"
-#include "base/time.h"
-#include "media/base/vector_math.h"
-#include "media/base/vector_math_testing.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using base::TimeTicks;
-using std::fill;
-
-// Command line switch for runtime adjustment of benchmark iterations.
-static const char kBenchmarkIterations[] = "vector-math-iterations";
-static const int kDefaultIterations = 10;
-
-// Default test values.
-static const float kScale = 0.5;
-static const float kInputFillValue = 1.0;
-static const float kOutputFillValue = 3.0;
-
-namespace media {
-
-class VectorMathTest : public testing::Test {
- public:
- static const int kVectorSize = 8192;
-
- VectorMathTest() {
- // Initialize input and output vectors.
- input_vector.reset(static_cast<float*>(base::AlignedAlloc(
- sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
- output_vector.reset(static_cast<float*>(base::AlignedAlloc(
- sizeof(float) * kVectorSize, vector_math::kRequiredAlignment)));
- }
-
- void FillTestVectors(float input, float output) {
- // Setup input and output vectors.
- fill(input_vector.get(), input_vector.get() + kVectorSize, input);
- fill(output_vector.get(), output_vector.get() + kVectorSize, output);
- }
-
- void VerifyOutput(float value) {
- for (int i = 0; i < kVectorSize; ++i)
- ASSERT_FLOAT_EQ(output_vector.get()[i], value);
- }
-
- int BenchmarkIterations() {
- int vector_math_iterations = kDefaultIterations;
- std::string iterations(
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- kBenchmarkIterations));
- if (!iterations.empty())
- base::StringToInt(iterations, &vector_math_iterations);
- return vector_math_iterations;
- }
-
- protected:
- int benchmark_iterations;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> input_vector;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> output_vector;
-
- DISALLOW_COPY_AND_ASSIGN(VectorMathTest);
-};
-
-// Ensure each optimized vector_math::FMAC() method returns the same value.
-TEST_F(VectorMathTest, FMAC) {
- static const float kResult = kInputFillValue * kScale + kOutputFillValue;
-
- {
- SCOPED_TRACE("FMAC");
- FillTestVectors(kInputFillValue, kOutputFillValue);
- vector_math::FMAC(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- VerifyOutput(kResult);
- }
-
- {
- SCOPED_TRACE("FMAC_C");
- FillTestVectors(kInputFillValue, kOutputFillValue);
- vector_math::FMAC_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- VerifyOutput(kResult);
- }
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
- {
- SCOPED_TRACE("FMAC_SSE");
- FillTestVectors(kInputFillValue, kOutputFillValue);
- vector_math::FMAC_SSE(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- VerifyOutput(kResult);
- }
-#endif
-}
-
-// Benchmark for each optimized vector_math::FMAC() method. Original benchmarks
-// were run with --vector-fmac-iterations=200000.
-TEST_F(VectorMathTest, FMACBenchmark) {
- static const int kBenchmarkIterations = BenchmarkIterations();
-
- printf("Benchmarking %d iterations:\n", kBenchmarkIterations);
-
- // Benchmark FMAC_C().
- FillTestVectors(kInputFillValue, kOutputFillValue);
- TimeTicks start = TimeTicks::HighResNow();
- for (int i = 0; i < kBenchmarkIterations; ++i) {
- vector_math::FMAC_C(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- }
- double total_time_c_ms = (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FMAC_C took %.2fms.\n", total_time_c_ms);
-
-#if defined(ARCH_CPU_X86_FAMILY) && defined(__SSE__)
- // Benchmark FMAC_SSE() with unaligned size.
- ASSERT_NE((kVectorSize - 1) % (vector_math::kRequiredAlignment /
- sizeof(float)), 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMAC_SSE(
- input_vector.get(), kScale, kVectorSize - 1, output_vector.get());
- }
- double total_time_sse_unaligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FMAC_SSE (unaligned size) took %.2fms; which is %.2fx faster than"
- " FMAC_C.\n", total_time_sse_unaligned_ms,
- total_time_c_ms / total_time_sse_unaligned_ms);
-
- // Benchmark FMAC_SSE() with aligned size.
- ASSERT_EQ(kVectorSize % (vector_math::kRequiredAlignment / sizeof(float)),
- 0U);
- FillTestVectors(kInputFillValue, kOutputFillValue);
- start = TimeTicks::HighResNow();
- for (int j = 0; j < kBenchmarkIterations; ++j) {
- vector_math::FMAC_SSE(
- input_vector.get(), kScale, kVectorSize, output_vector.get());
- }
- double total_time_sse_aligned_ms =
- (TimeTicks::HighResNow() - start).InMillisecondsF();
- printf("FMAC_SSE (aligned size) took %.2fms; which is %.2fx faster than"
- " FMAC_C and %.2fx faster than FMAC_SSE (unaligned size).\n",
- total_time_sse_aligned_ms, total_time_c_ms / total_time_sse_aligned_ms,
- total_time_sse_unaligned_ms / total_time_sse_aligned_ms);
-#endif
-}
-
-} // namespace media
diff --git a/src/media/base/video_decoder.cc b/src/media/base/video_decoder.cc
deleted file mode 100644
index 4c7c4b7..0000000
--- a/src/media/base/video_decoder.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_decoder.h"
-
-namespace media {
-
-VideoDecoder::VideoDecoder() {}
-
-VideoDecoder::~VideoDecoder() {}
-
-bool VideoDecoder::HasAlpha() const {
- return false;
-}
-
-} // namespace media
diff --git a/src/media/base/video_decoder.h b/src/media/base/video_decoder.h
deleted file mode 100644
index fc821e7..0000000
--- a/src/media/base/video_decoder.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_DECODER_H_
-#define MEDIA_BASE_VIDEO_DECODER_H_
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "media/base/pipeline_status.h"
-#include "media/base/media_export.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-class DemuxerStream;
-class VideoFrame;
-
-class MEDIA_EXPORT VideoDecoder
- : public base::RefCountedThreadSafe<VideoDecoder> {
- public:
- // Status codes for read operations on VideoDecoder.
- enum Status {
- kOk, // Everything went as planned.
- kDecodeError, // Decoding error happened.
- kDecryptError // Decrypting error happened.
- };
-
- // Initializes a VideoDecoder with the given DemuxerStream, executing the
- // |status_cb| upon completion.
- // |statistics_cb| is used to update the global pipeline statistics.
- // Note: No VideoDecoder calls should be made before |status_cb| is executed.
- virtual void Initialize(const scoped_refptr<DemuxerStream>& stream,
- const PipelineStatusCB& status_cb,
- const StatisticsCB& statistics_cb) = 0;
-
- // Requests a frame to be decoded. The status of the decoder and decoded frame
- // are returned via the provided callback. Only one read may be in flight at
- // any given time.
- //
- // Implementations guarantee that the callback will not be called from within
- // this method.
- //
- // If the returned status is not kOk, some error has occurred in the video
- // decoder. In this case, the returned frame should always be NULL.
- //
- // Otherwise, the video decoder is in good shape. In this case, Non-NULL
- // frames contain decoded video data or may indicate the end of the stream.
- // NULL video frames indicate an aborted read. This can happen if the
- // DemuxerStream gets flushed and doesn't have any more data to return.
- typedef base::Callback<void(Status, const scoped_refptr<VideoFrame>&)> ReadCB;
- virtual void Read(const ReadCB& read_cb) = 0;
-
- // Resets decoder state, fulfilling all pending ReadCB and dropping extra
- // queued decoded data. After this call, the decoder is back to an initialized
- // clean state.
- // Note: No VideoDecoder calls should be made before |closure| is executed.
- virtual void Reset(const base::Closure& closure) = 0;
-
- // Stops decoder, fires any pending callbacks and sets the decoder to an
- // uninitialized state. A VideoDecoder cannot be re-initialized after it has
- // been stopped.
- // Note that if Initialize() has been called, Stop() must be called and
- // complete before deleting the decoder.
- virtual void Stop(const base::Closure& closure) = 0;
-
- // Returns true if the output format has an alpha channel. Most formats do not
- // have alpha so the default is false. Override and return true for decoders
- // that return formats with an alpha channel.
- virtual bool HasAlpha() const;
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // Notify the decoder that we are going to overflow if the decoder keeps
- // working at the current speed. The decoder should try to decode faster or
- // even drop frames if possible.
- virtual void NearlyUnderflow() {}
- // Notify the decoder that we have enough frames cached and no longer need to
- // sacrifice quality for speed. Note that it doesn't mean that the decoder no
- // longer needs to decode more frames. The decoding of frames is controlled
- // by Read() and the decoder should keep decoding when there is pending read.
- virtual void HaveEnoughFrames() {}
-#endif // defined(__LB_SHELL__) || defined(COBALT)
-
- protected:
- friend class base::RefCountedThreadSafe<VideoDecoder>;
- virtual ~VideoDecoder();
- VideoDecoder();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VideoDecoder);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_DECODER_H_
diff --git a/src/media/base/video_decoder_config.cc b/src/media/base/video_decoder_config.cc
deleted file mode 100644
index 7d0055f..0000000
--- a/src/media/base/video_decoder_config.cc
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_decoder_config.h"
-
-#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "media/base/video_types.h"
-
-namespace media {
-
-VideoDecoderConfig::VideoDecoderConfig()
- : codec_(kUnknownVideoCodec),
- profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
- format_(VideoFrame::INVALID),
- extra_data_size_(0),
- is_encrypted_(false),
- color_space_(COLOR_SPACE_UNSPECIFIED) {}
-
-VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
- VideoCodecProfile profile,
- VideoFrame::Format format,
- ColorSpace color_space,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted) {
- Initialize(codec, profile, format, color_space, coded_size, visible_rect,
- natural_size, extra_data, extra_data_size, is_encrypted, true);
-}
-
-VideoDecoderConfig::~VideoDecoderConfig() {}
-
-// Some videos just want to watch the world burn, with a height of 0; cap the
-// "infinite" aspect ratio resulting.
-static const int kInfiniteRatio = 99999;
-
-// Common aspect ratios (multiplied by 100 and truncated) used for histogramming
-// video sizes. These were taken on 20111103 from
-// http://wikipedia.org/wiki/Aspect_ratio_(image)#Previous_and_currently_used_aspect_ratios
-static const int kCommonAspectRatios100[] = {
- 100, 115, 133, 137, 143, 150, 155, 160, 166, 175, 177, 185, 200, 210, 220,
- 221, 235, 237, 240, 255, 259, 266, 276, 293, 400, 1200, kInfiniteRatio,
-};
-
-template<class T> // T has int width() & height() methods.
-static void UmaHistogramAspectRatio(const char* name, const T& size) {
- UMA_HISTOGRAM_CUSTOM_ENUMERATION(
- name,
- // Intentionally use integer division to truncate the result.
- size.height() ? (size.width() * 100) / size.height() : kInfiniteRatio,
- base::CustomHistogram::ArrayToCustomRanges(
- kCommonAspectRatios100, arraysize(kCommonAspectRatios100)));
-}
-
-void VideoDecoderConfig::Initialize(VideoCodec codec,
- VideoCodecProfile profile,
- VideoFrame::Format format,
- ColorSpace color_space,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted,
- bool record_stats) {
- CHECK((extra_data_size != 0) == (extra_data != NULL));
-
- if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.VideoCodec", codec, kVideoCodecMax + 1);
- // Drop UNKNOWN because U_H_E() uses one bucket for all values less than 1.
- if (profile >= 0) {
- UMA_HISTOGRAM_ENUMERATION("Media.VideoCodecProfile", profile,
- VIDEO_CODEC_PROFILE_MAX + 1);
- }
- UMA_HISTOGRAM_COUNTS_10000("Media.VideoCodedWidth", coded_size.width());
- UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
- UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
- UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
- }
-
- codec_ = codec;
- profile_ = profile;
- format_ = format;
- color_space_ = color_space;
- coded_size_ = coded_size;
- visible_rect_ = visible_rect;
- natural_size_ = natural_size;
- extra_data_size_ = extra_data_size;
-
- if (extra_data_size_ > 0) {
- extra_data_.reset(new uint8[extra_data_size_]);
- memcpy(extra_data_.get(), extra_data, extra_data_size_);
- } else {
- extra_data_.reset();
- }
-
- is_encrypted_ = is_encrypted;
-
- switch (color_space) {
- case COLOR_SPACE_JPEG:
- webm_color_metadata_.color_space = gfx::ColorSpace::CreateJpeg();
- break;
- case COLOR_SPACE_HD_REC709:
- webm_color_metadata_.color_space = gfx::ColorSpace::CreateREC709();
- break;
- case COLOR_SPACE_SD_REC601:
- webm_color_metadata_.color_space = gfx::ColorSpace::CreateREC601();
- break;
- case COLOR_SPACE_UNSPECIFIED:
- break;
- default:
- NOTREACHED();
- break;
- }
-}
-
-void VideoDecoderConfig::CopyFrom(const VideoDecoderConfig& video_config) {
- Initialize(video_config.codec(), video_config.profile(),
- video_config.format(), video_config.color_space_,
- video_config.coded_size(), video_config.visible_rect(),
- video_config.natural_size(), video_config.extra_data(),
- video_config.extra_data_size(), video_config.is_encrypted(),
- false);
- webm_color_metadata_ = video_config.webm_color_metadata_;
-}
-
-bool VideoDecoderConfig::IsValidConfig() const {
- return codec_ != kUnknownVideoCodec &&
- natural_size_.width() > 0 &&
- natural_size_.height() > 0 &&
- VideoFrame::IsValidConfig(format_, coded_size_, visible_rect_,
- natural_size_);
-}
-
-bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const {
- return ((codec() == config.codec()) && (format() == config.format()) &&
- (webm_color_metadata_ == config.webm_color_metadata()) &&
- (profile() == config.profile()) &&
- (coded_size() == config.coded_size()) &&
- (visible_rect() == config.visible_rect()) &&
- (natural_size() == config.natural_size()) &&
- (extra_data_size() == config.extra_data_size()) &&
- (!extra_data() ||
- !memcmp(extra_data(), config.extra_data(), extra_data_size())) &&
- (is_encrypted() == config.is_encrypted()));
-}
-
-std::string VideoDecoderConfig::AsHumanReadableString() const {
- std::ostringstream s;
- s << "codec: " << codec()
- << " format: " << format()
- << " profile: " << profile()
- << " coded size: [" << coded_size().width()
- << "," << coded_size().height() << "]"
- << " visible rect: [" << visible_rect().x()
- << "," << visible_rect().y()
- << "," << visible_rect().width()
- << "," << visible_rect().height() << "]"
- << " natural size: [" << natural_size().width()
- << "," << natural_size().height() << "]"
- << " has extra data? " << (extra_data() ? "true" : "false")
- << " encrypted? " << (is_encrypted() ? "true" : "false");
- return s.str();
-}
-
-VideoCodec VideoDecoderConfig::codec() const {
- return codec_;
-}
-
-VideoCodecProfile VideoDecoderConfig::profile() const {
- return profile_;
-}
-
-VideoFrame::Format VideoDecoderConfig::format() const {
- return format_;
-}
-
-gfx::Size VideoDecoderConfig::coded_size() const {
- return coded_size_;
-}
-
-gfx::Rect VideoDecoderConfig::visible_rect() const {
- return visible_rect_;
-}
-
-gfx::Size VideoDecoderConfig::natural_size() const {
- return natural_size_;
-}
-
-uint8* VideoDecoderConfig::extra_data() const {
- return extra_data_.get();
-}
-
-size_t VideoDecoderConfig::extra_data_size() const {
- return extra_data_size_;
-}
-
-bool VideoDecoderConfig::is_encrypted() const {
- return is_encrypted_;
-}
-
-} // namespace media
diff --git a/src/media/base/video_decoder_config.h b/src/media/base/video_decoder_config.h
deleted file mode 100644
index f5b6122..0000000
--- a/src/media/base/video_decoder_config.h
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
-#define MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
-
-#include <string>
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/base/color_space.h"
-#include "media/base/hdr_metadata.h"
-#include "media/base/media_export.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_types.h"
-#include "media/webm/webm_colour_parser.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-enum VideoCodec {
- // These values are histogrammed over time; do not change their ordinal
- // values. When deleting a codec replace it with a dummy value; when adding a
- // codec, do so at the bottom (and update kVideoCodecMax).
- kUnknownVideoCodec = 0,
- kCodecH264,
- kCodecVC1,
- kCodecMPEG2,
- kCodecMPEG4,
- kCodecTheora,
- kCodecVP8,
- kCodecVP9,
- // DO NOT ADD RANDOM VIDEO CODECS!
- //
- // The only acceptable time to add a new codec is if there is production code
- // that uses said codec in the same CL.
-
- kVideoCodecMax = kCodecVP8 // Must equal the last "real" codec above.
-};
-
-// Video stream profile. This *must* match PP_VideoDecoder_Profile.
-// (enforced in webkit/plugins/ppapi/ppb_video_decoder_impl.cc)
-enum VideoCodecProfile {
- // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
- // for example), and keep the values for a particular format grouped
- // together for clarity.
- VIDEO_CODEC_PROFILE_UNKNOWN = -1,
- H264PROFILE_MIN = 0,
- H264PROFILE_BASELINE = H264PROFILE_MIN,
- H264PROFILE_MAIN = 1,
- H264PROFILE_EXTENDED = 2,
- H264PROFILE_HIGH = 3,
- H264PROFILE_HIGH10PROFILE = 4,
- H264PROFILE_HIGH422PROFILE = 5,
- H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
- H264PROFILE_SCALABLEBASELINE = 7,
- H264PROFILE_SCALABLEHIGH = 8,
- H264PROFILE_STEREOHIGH = 9,
- H264PROFILE_MULTIVIEWHIGH = 10,
- H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
- VP8PROFILE_MIN = 11,
- VP8PROFILE_MAIN = VP8PROFILE_MIN,
- VP8PROFILE_MAX = VP8PROFILE_MAIN,
- VP9PROFILE_MIN = 12,
- VP9PROFILE_MAIN = VP9PROFILE_MIN,
- VP9PROFILE_MAX = VP9PROFILE_MAIN,
- VIDEO_CODEC_PROFILE_MAX = VP9PROFILE_MAX,
-};
-
-class MEDIA_EXPORT VideoDecoderConfig {
- public:
- // Constructs an uninitialized object. Clients should call Initialize() with
- // appropriate values before using.
- VideoDecoderConfig();
-
- // Constructs an initialized object. It is acceptable to pass in NULL for
- // |extra_data|, otherwise the memory is copied.
- VideoDecoderConfig(VideoCodec codec,
- VideoCodecProfile profile,
- VideoFrame::Format format,
- ColorSpace color_space,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted);
-
- ~VideoDecoderConfig();
-
- // Resets the internal state of this object.
- void Initialize(VideoCodec codec,
- VideoCodecProfile profile,
- VideoFrame::Format format,
- ColorSpace color_space,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- const uint8* extra_data,
- size_t extra_data_size,
- bool is_encrypted,
- bool record_stats);
-
- // Deep copies |video_config|.
- void CopyFrom(const VideoDecoderConfig& video_config);
-
- // Returns true if this object has appropriate configuration values, false
- // otherwise.
- bool IsValidConfig() const;
-
- // Returns true if all fields in |config| match this config.
- // Note: The contents of |extra_data_| are compared not the raw pointers.
- bool Matches(const VideoDecoderConfig& config) const;
-
- // Returns a human-readable string describing |*this|. For debugging & test
- // output only.
- std::string AsHumanReadableString() const;
-
- VideoCodec codec() const;
- VideoCodecProfile profile() const;
-
- // Video format used to determine YUV buffer sizes.
- VideoFrame::Format format() const;
-
- // Width and height of video frame immediately post-decode. Not all pixels
- // in this region are valid.
- gfx::Size coded_size() const;
-
- // Region of |coded_size_| that is visible.
- gfx::Rect visible_rect() const;
-
- // Final visible width and height of a video frame with aspect ratio taken
- // into account.
- gfx::Size natural_size() const;
-
- // Optional byte data required to initialize video decoders, such as H.264
- // AAVC data.
- uint8* extra_data() const;
- size_t extra_data_size() const;
-
- // Whether the video stream is potentially encrypted.
- // Note that in a potentially encrypted video stream, individual buffers
- // can be encrypted or not encrypted.
- bool is_encrypted() const;
-
- void set_webm_color_metadata(const WebMColorMetadata& webm_color_metadata) {
- webm_color_metadata_ = webm_color_metadata;
- }
-
- const WebMColorMetadata& webm_color_metadata() const {
- return webm_color_metadata_;
- }
-
- const ColorSpace& color_space() const { return color_space_; }
-
- private:
- VideoCodec codec_;
- VideoCodecProfile profile_;
-
- VideoFrame::Format format_;
-
- // TODO(servolk): Deprecated, use color_space_info_ instead.
- ColorSpace color_space_;
-
- gfx::Size coded_size_;
- gfx::Rect visible_rect_;
- gfx::Size natural_size_;
-
- scoped_array<uint8> extra_data_;
- size_t extra_data_size_;
-
- bool is_encrypted_;
-
- WebMColorMetadata webm_color_metadata_;
- DISALLOW_COPY_AND_ASSIGN(VideoDecoderConfig);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
diff --git a/src/media/base/video_frame.cc b/src/media/base/video_frame.cc
deleted file mode 100644
index 581246c..0000000
--- a/src/media/base/video_frame.cc
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_frame.h"
-
-#include <algorithm>
-
-#include "base/bind.h"
-#include "base/callback_helpers.h"
-#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
-#include "base/string_piece.h"
-#include "media/base/limits.h"
-#include "media/base/video_util.h"
-
-namespace media {
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreateFrame(
- VideoFrame::Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp) {
- DCHECK(IsValidConfig(format, coded_size, visible_rect, natural_size));
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
- switch (format) {
- case VideoFrame::RGB32:
- frame->AllocateRGB(4u);
- break;
- case VideoFrame::YV12:
- case VideoFrame::YV16:
- frame->AllocateYUV();
- break;
- default:
- LOG(FATAL) << "Unsupported frame format: " << format;
- }
- return frame;
-}
-
-// static
-bool VideoFrame::IsValidConfig(VideoFrame::Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size) {
- return (format != VideoFrame::INVALID &&
- !coded_size.IsEmpty() &&
- coded_size.GetArea() <= limits::kMaxCanvas &&
- coded_size.width() <= limits::kMaxDimension &&
- coded_size.height() <= limits::kMaxDimension &&
- !visible_rect.IsEmpty() &&
- visible_rect.x() >= 0 && visible_rect.y() >= 0 &&
- visible_rect.right() <= coded_size.width() &&
- visible_rect.bottom() <= coded_size.height() &&
- !natural_size.IsEmpty() &&
- natural_size.GetArea() <= limits::kMaxCanvas &&
- natural_size.width() <= limits::kMaxDimension &&
- natural_size.height() <= limits::kMaxDimension);
-}
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
- uintptr_t texture_id,
- uint32 texture_target,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb,
- const base::Closure& no_longer_needed_cb) {
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- NATIVE_TEXTURE, coded_size, visible_rect, natural_size, timestamp));
- frame->texture_id_ = texture_id;
- frame->texture_target_ = texture_target;
- frame->read_pixels_cb_ = read_pixels_cb;
- frame->no_longer_needed_cb_ = no_longer_needed_cb;
- return frame;
-}
-
-void VideoFrame::ReadPixelsFromNativeTexture(void* pixels) {
- DCHECK_EQ(format_, NATIVE_TEXTURE);
- if (!read_pixels_cb_.is_null())
- read_pixels_cb_.Run(pixels);
-}
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
- Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- int32 y_stride, int32 u_stride, int32 v_stride,
- uint8* y_data, uint8* u_data, uint8* v_data,
- base::TimeDelta timestamp,
- const base::Closure& no_longer_needed_cb) {
- DCHECK(format == YV12 || format == YV16 || format == I420) << format;
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- format, coded_size, visible_rect, natural_size, timestamp));
- frame->strides_[kYPlane] = y_stride;
- frame->strides_[kUPlane] = u_stride;
- frame->strides_[kVPlane] = v_stride;
- frame->data_[kYPlane] = y_data;
- frame->data_[kUPlane] = u_data;
- frame->data_[kVPlane] = v_data;
- frame->no_longer_needed_cb_ = no_longer_needed_cb;
- return frame;
-}
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreateEmptyFrame() {
- return new VideoFrame(
- VideoFrame::EMPTY, gfx::Size(), gfx::Rect(), gfx::Size(),
- base::TimeDelta());
-}
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreateColorFrame(
- const gfx::Size& size,
- uint8 y, uint8 u, uint8 v,
- base::TimeDelta timestamp) {
- DCHECK(IsValidConfig(VideoFrame::YV12, size, gfx::Rect(size), size));
- scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
- VideoFrame::YV12, size, gfx::Rect(size), size, timestamp);
- FillYUV(frame, y, u, v);
- return frame;
-}
-
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreateBlackFrame(const gfx::Size& size) {
- const uint8 kBlackY = 0x00;
- const uint8 kBlackUV = 0x80;
- const base::TimeDelta kZero;
- return CreateColorFrame(size, kBlackY, kBlackUV, kBlackUV, kZero);
-}
-
-#if defined(__LB_SHELL__) || defined(COBALT)
-// static
-scoped_refptr<VideoFrame> VideoFrame::CreatePunchOutFrame(
- const gfx::Size& size) {
- scoped_refptr<VideoFrame> frame(new VideoFrame(
- VideoFrame::PUNCH_OUT, gfx::Size(size), gfx::Rect(size), gfx::Size(size),
- base::TimeDelta()));
- return frame;
-}
-#endif
-
-static inline size_t RoundUp(size_t value, size_t alignment) {
- // Check that |alignment| is a power of 2.
- DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
- return ((value + (alignment - 1)) & ~(alignment-1));
-}
-
-// Release data allocated by AllocateRGB() or AllocateYUV().
-static void ReleaseData(uint8* data) {
- DCHECK(data);
- base::AlignedFree(data);
-}
-
-void VideoFrame::AllocateRGB(size_t bytes_per_pixel) {
- // Round up to align at least at a 16-byte boundary for each row.
- // This is sufficient for MMX and SSE2 reads (movq/movdqa).
- size_t bytes_per_row = RoundUp(coded_size_.width(),
- kFrameSizeAlignment) * bytes_per_pixel;
- size_t aligned_height = RoundUp(coded_size_.height(), kFrameSizeAlignment);
- strides_[VideoFrame::kRGBPlane] = bytes_per_row;
- data_[VideoFrame::kRGBPlane] = reinterpret_cast<uint8*>(
- base::AlignedAlloc(bytes_per_row * aligned_height + kFrameSizePadding,
- kFrameAddressAlignment));
- no_longer_needed_cb_ = base::Bind(&ReleaseData, data_[VideoFrame::kRGBPlane]);
- DCHECK(!(reinterpret_cast<intptr_t>(data_[VideoFrame::kRGBPlane]) & 7));
- COMPILE_ASSERT(0 == VideoFrame::kRGBPlane, RGB_data_must_be_index_0);
-}
-
-void VideoFrame::AllocateYUV() {
- DCHECK(format_ == VideoFrame::YV12 || format_ == VideoFrame::YV16);
- // Align Y rows at least at 16 byte boundaries. The stride for both
- // YV12 and YV16 is 1/2 of the stride of Y. For YV12, every row of bytes for
- // U and V applies to two rows of Y (one byte of UV for 4 bytes of Y), so in
- // the case of YV12 the strides are identical for the same width surface, but
- // the number of bytes allocated for YV12 is 1/2 the amount for U & V as
- // YV16. We also round the height of the surface allocated to be an even
- // number to avoid any potential of faulting by code that attempts to access
- // the Y values of the final row, but assumes that the last row of U & V
- // applies to a full two rows of Y.
- size_t y_stride = RoundUp(row_bytes(VideoFrame::kYPlane),
- kFrameSizeAlignment);
- size_t uv_stride = RoundUp(row_bytes(VideoFrame::kUPlane),
- kFrameSizeAlignment);
- // The *2 here is because some formats (e.g. h264) allow interlaced coding,
- // and then the size needs to be a multiple of two macroblocks (vertically).
- // See libavcodec/utils.c:avcodec_align_dimensions2().
- size_t y_height = RoundUp(coded_size_.height(), kFrameSizeAlignment * 2);
- size_t uv_height = format_ == VideoFrame::YV12 ? y_height / 2 : y_height;
- size_t y_bytes = y_height * y_stride;
- size_t uv_bytes = uv_height * uv_stride;
-
- // The extra line of UV being allocated is because h264 chroma MC
- // overreads by one line in some cases, see libavcodec/utils.c:
- // avcodec_align_dimensions2() and libavcodec/x86/h264_chromamc.asm:
- // put_h264_chroma_mc4_ssse3().
- uint8* data = reinterpret_cast<uint8*>(
- base::AlignedAlloc(
- y_bytes + (uv_bytes * 2 + uv_stride) + kFrameSizePadding,
- kFrameAddressAlignment));
- no_longer_needed_cb_ = base::Bind(&ReleaseData, data);
- COMPILE_ASSERT(0 == VideoFrame::kYPlane, y_plane_data_must_be_index_0);
- data_[VideoFrame::kYPlane] = data;
- data_[VideoFrame::kUPlane] = data + y_bytes;
- data_[VideoFrame::kVPlane] = data + y_bytes + uv_bytes;
- strides_[VideoFrame::kYPlane] = y_stride;
- strides_[VideoFrame::kUPlane] = uv_stride;
- strides_[VideoFrame::kVPlane] = uv_stride;
-}
-
-VideoFrame::VideoFrame(VideoFrame::Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp)
- : format_(format),
- coded_size_(coded_size),
- visible_rect_(visible_rect),
- natural_size_(natural_size),
- texture_id_(0),
- texture_target_(0),
- timestamp_(timestamp) {
- memset(&strides_, 0, sizeof(strides_));
- memset(&data_, 0, sizeof(data_));
-}
-
-VideoFrame::~VideoFrame() {
- if (!no_longer_needed_cb_.is_null())
- base::ResetAndReturn(&no_longer_needed_cb_).Run();
-}
-
-bool VideoFrame::IsValidPlane(size_t plane) const {
- switch (format_) {
- case RGB32:
- return plane == kRGBPlane;
-
- case YV12:
- case YV16:
- return plane == kYPlane || plane == kUPlane || plane == kVPlane;
-
- case NATIVE_TEXTURE:
- NOTREACHED() << "NATIVE_TEXTUREs don't use plane-related methods!";
- return false;
-
- default:
- break;
- }
-
- // Intentionally leave out non-production formats.
- NOTREACHED() << "Unsupported video frame format: " << format_;
- return false;
-}
-
-int VideoFrame::stride(size_t plane) const {
- DCHECK(IsValidPlane(plane));
- return strides_[plane];
-}
-
-int VideoFrame::row_bytes(size_t plane) const {
- DCHECK(IsValidPlane(plane));
- int width = coded_size_.width();
- switch (format_) {
- // 32bpp.
- case RGB32:
- return width * 4;
-
- // Planar, 8bpp.
- case YV12:
- case YV16:
- if (plane == kYPlane)
- return width;
- return RoundUp(width, 2) / 2;
-
- default:
- break;
- }
-
- // Intentionally leave out non-production formats.
- NOTREACHED() << "Unsupported video frame format: " << format_;
- return 0;
-}
-
-int VideoFrame::rows(size_t plane) const {
- DCHECK(IsValidPlane(plane));
- int height = coded_size_.height();
- switch (format_) {
- case RGB32:
- case YV16:
- return height;
-
- case YV12:
- if (plane == kYPlane)
- return height;
- return RoundUp(height, 2) / 2;
-
- default:
- break;
- }
-
- // Intentionally leave out non-production formats.
- NOTREACHED() << "Unsupported video frame format: " << format_;
- return 0;
-}
-
-uint8* VideoFrame::data(size_t plane) const {
- DCHECK(IsValidPlane(plane));
- return data_[plane];
-}
-
-uintptr_t VideoFrame::texture_id() const {
- DCHECK_EQ(format_, NATIVE_TEXTURE);
- return texture_id_;
-}
-
-uint32 VideoFrame::texture_target() const {
- DCHECK_EQ(format_, NATIVE_TEXTURE);
- return texture_target_;
-}
-
-bool VideoFrame::IsEndOfStream() const {
- return format_ == VideoFrame::EMPTY;
-}
-
-void VideoFrame::HashFrameForTesting(base::MD5Context* context) {
- for (int plane = 0; plane < kMaxPlanes; ++plane) {
- if (!IsValidPlane(plane))
- break;
- for (int row = 0; row < rows(plane); ++row) {
- base::MD5Update(context, base::StringPiece(
- reinterpret_cast<char*>(data(plane) + stride(plane) * row),
- row_bytes(plane)));
- }
- }
-}
-
-} // namespace media
diff --git a/src/media/base/video_frame.h b/src/media/base/video_frame.h
deleted file mode 100644
index 879feac..0000000
--- a/src/media/base/video_frame.h
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_FRAME_H_
-#define MEDIA_BASE_VIDEO_FRAME_H_
-
-#include "base/callback.h"
-#include "base/md5.h"
-#include "media/base/buffers.h"
-#include "ui/gfx/rect.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
- public:
- enum {
- kFrameSizeAlignment = 16,
- kFrameSizePadding = 16,
- kFrameAddressAlignment = 32
- };
-
- enum {
- kMaxPlanes = 3,
-
- kRGBPlane = 0,
-
- kYPlane = 0,
- kUPlane = 1,
- kVPlane = 2,
- };
-
- // Surface formats roughly based on FOURCC labels, see:
- // http://www.fourcc.org/rgb.php
- // http://www.fourcc.org/yuv.php
- // Keep in sync with WebKit::WebVideoFrame!
- enum Format {
- INVALID = 0, // Invalid format value. Used for error reporting.
- RGB32 = 4, // 32bpp RGB packed with extra byte 8:8:8
- YV12 = 6, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
- YV16 = 7, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
- EMPTY = 9, // An empty frame.
- I420 = 11, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
- NATIVE_TEXTURE = 12, // Native texture. Pixel-format agnostic.
-#if defined(__LB_SHELL__) || defined(COBALT)
- PUNCH_OUT= 13, // Punch out frame
-#endif
- };
-
- // Creates a new frame in system memory with given parameters. Buffers for
- // the frame are allocated but not initialized.
- // |coded_size| is the width and height of the frame data in pixels.
- // |visible_rect| is the visible portion of |coded_size|, after cropping (if
- // any) is applied.
- // |natural_size| is the width and height of the frame when the frame's aspect
- // ratio is applied to |visible_rect|.
- static scoped_refptr<VideoFrame> CreateFrame(
- Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp);
-
- // Call prior to CreateFrame to ensure validity of frame configuration. Called
- // automatically by VideoDecoderConfig::IsValidConfig().
- // TODO(scherkus): VideoDecoderConfig shouldn't call this method
- static bool IsValidConfig(Format format, const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size);
-
- // CB to write pixels from the texture backing this frame into the
- // |void*| parameter.
- typedef base::Callback<void(void*)> ReadPixelsCB;
-
- // Wraps a native texture of the given parameters with a VideoFrame. When the
- // frame is destroyed |no_longer_needed_cb.Run()| will be called.
- // |coded_size| is the width and height of the frame data in pixels.
- // |visible_rect| is the visible portion of |coded_size|, after cropping (if
- // any) is applied.
- // |natural_size| is the width and height of the frame when the frame's aspect
- // ratio is applied to |visible_rect|.
- // |read_pixels_cb| may be used to do (slow!) readbacks from the
- // texture to main memory.
- static scoped_refptr<VideoFrame> WrapNativeTexture(
- uintptr_t texture_id,
- uint32 texture_target,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp,
- const ReadPixelsCB& read_pixels_cb,
- const base::Closure& no_longer_needed_cb);
-
- // Read pixels from the native texture backing |*this| and write
- // them to |*pixels| as BGRA. |pixels| must point to a buffer at
- // least as large as 4*visible_rect().width()*visible_rect().height().
- void ReadPixelsFromNativeTexture(void* pixels);
-
- // Wraps external YUV data of the given parameters with a VideoFrame.
- // The returned VideoFrame does not own the data passed in. When the frame
- // is destroyed |no_longer_needed_cb.Run()| will be called.
- static scoped_refptr<VideoFrame> WrapExternalYuvData(
- Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- int32 y_stride,
- int32 u_stride,
- int32 v_stride,
- uint8* y_data,
- uint8* u_data,
- uint8* v_data,
- base::TimeDelta timestamp,
- const base::Closure& no_longer_needed_cb);
-
- // Creates a frame with format equals to VideoFrame::EMPTY, width, height,
- // and timestamp are all 0.
- static scoped_refptr<VideoFrame> CreateEmptyFrame();
-
- // Allocates YV12 frame based on |size|, and sets its data to the YUV(y,u,v).
- static scoped_refptr<VideoFrame> CreateColorFrame(
- const gfx::Size& size,
- uint8 y, uint8 u, uint8 v,
- base::TimeDelta timestamp);
-
- // Allocates YV12 frame based on |size|, and sets its data to the YUV
- // equivalent of RGB(0,0,0).
- static scoped_refptr<VideoFrame> CreateBlackFrame(const gfx::Size& size);
-
-#if defined(__LB_SHELL__) || defined(COBALT)
- // Allocates a punch out frame with all stats set to 0.
- // When rendered, the frame will cause a hole to be punched out discarding
- // everything that was rendered underneath it
- static scoped_refptr<VideoFrame> CreatePunchOutFrame(const gfx::Size& size);
-#endif
-
- Format format() const { return format_; }
-
- const gfx::Size& coded_size() const { return coded_size_; }
- const gfx::Rect& visible_rect() const { return visible_rect_; }
- const gfx::Size& natural_size() const { return natural_size_; }
-
- int stride(size_t plane) const;
-
- // Returns the number of bytes per row and number of rows for a given plane.
- //
- // As opposed to stride(), row_bytes() refers to the bytes representing
- // frame data scanlines (coded_size.width() pixels, without stride padding).
- int row_bytes(size_t plane) const;
- int rows(size_t plane) const;
-
- // Returns pointer to the buffer for a given plane. The memory is owned by
- // VideoFrame object and must not be freed by the caller.
- uint8* data(size_t plane) const;
-
- // Returns the ID of the native texture wrapped by this frame. Only valid to
- // call if this is a NATIVE_TEXTURE frame.
- uintptr_t texture_id() const;
-
- // Returns the texture target. Only valid for NATIVE_TEXTURE frames.
- uint32 texture_target() const;
-
- // Returns true if this VideoFrame represents the end of the stream.
- bool IsEndOfStream() const;
-
- base::TimeDelta GetTimestamp() const {
- return timestamp_;
- }
- void SetTimestamp(const base::TimeDelta& timestamp) {
- timestamp_ = timestamp;
- }
-
- // Used to keep a running hash of seen frames. Expects an initialized MD5
- // context. Calls MD5Update with the context and the contents of the frame.
- void HashFrameForTesting(base::MD5Context* context);
-
- private:
- friend class base::RefCountedThreadSafe<VideoFrame>;
- // Clients must use the static CreateFrame() method to create a new frame.
- VideoFrame(Format format,
- const gfx::Size& coded_size,
- const gfx::Rect& visible_rect,
- const gfx::Size& natural_size,
- base::TimeDelta timestamp);
- virtual ~VideoFrame();
-
- // Used internally by CreateFrame().
- void AllocateRGB(size_t bytes_per_pixel);
- void AllocateYUV();
-
- // Used to DCHECK() plane parameters.
- bool IsValidPlane(size_t plane) const;
-
- // Frame format.
- Format format_;
-
- // Width and height of the video frame.
- gfx::Size coded_size_;
-
- // Width, height, and offsets of the visible portion of the video frame.
- gfx::Rect visible_rect_;
-
- // Width and height of the visible portion of the video frame with aspect
- // ratio taken into account.
- gfx::Size natural_size_;
-
- // Array of strides for each plane, typically greater or equal to the width
- // of the surface divided by the horizontal sampling period. Note that
- // strides can be negative.
- int32 strides_[kMaxPlanes];
-
- // Array of data pointers to each plane.
- uint8* data_[kMaxPlanes];
-
- // Native texture ID, if this is a NATIVE_TEXTURE frame.
- uintptr_t texture_id_;
- uint32 texture_target_;
- ReadPixelsCB read_pixels_cb_;
-
- base::Closure no_longer_needed_cb_;
-
- base::TimeDelta timestamp_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_FRAME_H_
diff --git a/src/media/base/video_frame_unittest.cc b/src/media/base/video_frame_unittest.cc
deleted file mode 100644
index 18cc1d3..0000000
--- a/src/media/base/video_frame_unittest.cc
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_frame.h"
-
-#include "base/format_macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/stringprintf.h"
-#include "media/base/buffers.h"
-#include "media/base/yuv_convert.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-using base::MD5DigestToBase16;
-
-// Helper function that initializes a YV12 frame with white and black scan
-// lines based on the |white_to_black| parameter. If 0, then the entire
-// frame will be black, if 1 then the entire frame will be white.
-void InitializeYV12Frame(VideoFrame* frame, double white_to_black) {
- EXPECT_EQ(VideoFrame::YV12, frame->format());
- int first_black_row = static_cast<int>(frame->coded_size().height() *
- white_to_black);
- uint8* y_plane = frame->data(VideoFrame::kYPlane);
- for (int row = 0; row < frame->coded_size().height(); ++row) {
- int color = (row < first_black_row) ? 0xFF : 0x00;
- memset(y_plane, color, frame->stride(VideoFrame::kYPlane));
- y_plane += frame->stride(VideoFrame::kYPlane);
- }
- uint8* u_plane = frame->data(VideoFrame::kUPlane);
- uint8* v_plane = frame->data(VideoFrame::kVPlane);
- for (int row = 0; row < frame->coded_size().height(); row += 2) {
- memset(u_plane, 0x80, frame->stride(VideoFrame::kUPlane));
- memset(v_plane, 0x80, frame->stride(VideoFrame::kVPlane));
- u_plane += frame->stride(VideoFrame::kUPlane);
- v_plane += frame->stride(VideoFrame::kVPlane);
- }
-}
-
-// Given a |yv12_frame| this method converts the YV12 frame to RGBA and
-// makes sure that all the pixels of the RBG frame equal |expect_rgb_color|.
-void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
- ASSERT_EQ(VideoFrame::YV12, yv12_frame->format());
- ASSERT_EQ(yv12_frame->stride(VideoFrame::kUPlane),
- yv12_frame->stride(VideoFrame::kVPlane));
-
- scoped_refptr<media::VideoFrame> rgb_frame;
- rgb_frame = media::VideoFrame::CreateFrame(VideoFrame::RGB32,
- yv12_frame->coded_size(),
- yv12_frame->visible_rect(),
- yv12_frame->natural_size(),
- yv12_frame->GetTimestamp());
-
- ASSERT_EQ(yv12_frame->coded_size().width(),
- rgb_frame->coded_size().width());
- ASSERT_EQ(yv12_frame->coded_size().height(),
- rgb_frame->coded_size().height());
-
- media::ConvertYUVToRGB32(yv12_frame->data(VideoFrame::kYPlane),
- yv12_frame->data(VideoFrame::kUPlane),
- yv12_frame->data(VideoFrame::kVPlane),
- rgb_frame->data(VideoFrame::kRGBPlane),
- rgb_frame->coded_size().width(),
- rgb_frame->coded_size().height(),
- yv12_frame->stride(VideoFrame::kYPlane),
- yv12_frame->stride(VideoFrame::kUPlane),
- rgb_frame->stride(VideoFrame::kRGBPlane),
- media::YV12);
-
- for (int row = 0; row < rgb_frame->coded_size().height(); ++row) {
- uint32* rgb_row_data = reinterpret_cast<uint32*>(
- rgb_frame->data(VideoFrame::kRGBPlane) +
- (rgb_frame->stride(VideoFrame::kRGBPlane) * row));
- for (int col = 0; col < rgb_frame->coded_size().width(); ++col) {
- SCOPED_TRACE(
- base::StringPrintf("Checking (%d, %d)", row, col));
- EXPECT_EQ(expect_rgb_color, rgb_row_data[col]);
- }
- }
-}
-
-// Fill each plane to its reported extents and verify accessors report non
-// zero values. Additionally, for the first plane verify the rows and
-// row_bytes values are correct.
-void ExpectFrameExtents(VideoFrame::Format format, int planes,
- int bytes_per_pixel, const char* expected_hash) {
- const unsigned char kFillByte = 0x80;
- const int kWidth = 61;
- const int kHeight = 31;
- const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
-
- gfx::Size size(kWidth, kHeight);
- scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
- format, size, gfx::Rect(size), size, kTimestamp);
- ASSERT_TRUE(frame);
-
- for(int plane = 0; plane < planes; plane++) {
- SCOPED_TRACE(base::StringPrintf("Checking plane %d", plane));
- EXPECT_TRUE(frame->data(plane));
- EXPECT_TRUE(frame->stride(plane));
- EXPECT_TRUE(frame->rows(plane));
- EXPECT_TRUE(frame->row_bytes(plane));
-
- if (plane == 0) {
- EXPECT_EQ(frame->rows(plane), kHeight);
- EXPECT_EQ(frame->row_bytes(plane), kWidth * bytes_per_pixel);
- }
-
- memset(frame->data(plane), kFillByte,
- frame->stride(plane) * frame->rows(plane));
- }
-
- base::MD5Context context;
- base::MD5Init(&context);
- frame->HashFrameForTesting(&context);
- base::MD5Digest digest;
- base::MD5Final(&digest, &context);
- EXPECT_EQ(MD5DigestToBase16(digest), expected_hash);
-}
-
-TEST(VideoFrame, CreateFrame) {
- const int kWidth = 64;
- const int kHeight = 48;
- const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
-
- // Create a YV12 Video Frame.
- gfx::Size size(kWidth, kHeight);
- scoped_refptr<media::VideoFrame> frame =
- VideoFrame::CreateFrame(media::VideoFrame::YV12, size, gfx::Rect(size),
- size, kTimestamp);
- ASSERT_TRUE(frame);
-
- // Test VideoFrame implementation.
- EXPECT_EQ(media::VideoFrame::YV12, frame->format());
- {
- SCOPED_TRACE("");
- InitializeYV12Frame(frame, 0.0f);
- ExpectFrameColor(frame, 0xFF000000);
- }
- base::MD5Digest digest;
- base::MD5Context context;
- base::MD5Init(&context);
- frame->HashFrameForTesting(&context);
- base::MD5Final(&digest, &context);
- EXPECT_EQ(MD5DigestToBase16(digest), "9065c841d9fca49186ef8b4ef547e79b");
- {
- SCOPED_TRACE("");
- InitializeYV12Frame(frame, 1.0f);
- ExpectFrameColor(frame, 0xFFFFFFFF);
- }
- base::MD5Init(&context);
- frame->HashFrameForTesting(&context);
- base::MD5Final(&digest, &context);
- EXPECT_EQ(MD5DigestToBase16(digest), "911991d51438ad2e1a40ed5f6fc7c796");
-
- // Test an empty frame.
- frame = VideoFrame::CreateEmptyFrame();
- EXPECT_TRUE(frame->IsEndOfStream());
-}
-
-TEST(VideoFrame, CreateBlackFrame) {
- const int kWidth = 2;
- const int kHeight = 2;
- const uint8 kExpectedYRow[] = { 0, 0 };
- const uint8 kExpectedUVRow[] = { 128 };
-
- scoped_refptr<media::VideoFrame> frame =
- VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight));
- ASSERT_TRUE(frame);
-
- // Test basic properties.
- EXPECT_EQ(0, frame->GetTimestamp().InMicroseconds());
- EXPECT_FALSE(frame->IsEndOfStream());
-
- // Test |frame| properties.
- EXPECT_EQ(VideoFrame::YV12, frame->format());
- EXPECT_EQ(kWidth, frame->coded_size().width());
- EXPECT_EQ(kHeight, frame->coded_size().height());
-
- // Test frames themselves.
- uint8* y_plane = frame->data(VideoFrame::kYPlane);
- for (int y = 0; y < frame->coded_size().height(); ++y) {
- EXPECT_EQ(0, memcmp(kExpectedYRow, y_plane, arraysize(kExpectedYRow)));
- y_plane += frame->stride(VideoFrame::kYPlane);
- }
-
- uint8* u_plane = frame->data(VideoFrame::kUPlane);
- uint8* v_plane = frame->data(VideoFrame::kVPlane);
- for (int y = 0; y < frame->coded_size().height() / 2; ++y) {
- EXPECT_EQ(0, memcmp(kExpectedUVRow, u_plane, arraysize(kExpectedUVRow)));
- EXPECT_EQ(0, memcmp(kExpectedUVRow, v_plane, arraysize(kExpectedUVRow)));
- u_plane += frame->stride(VideoFrame::kUPlane);
- v_plane += frame->stride(VideoFrame::kVPlane);
- }
-}
-
-// Ensure each frame is properly sized and allocated. Will trigger OOB reads
-// and writes as well as incorrect frame hashes otherwise.
-TEST(VideoFrame, CheckFrameExtents) {
- // Each call consists of a VideoFrame::Format, # of planes, bytes per pixel,
- // and the expected hash of all planes if filled with kFillByte (defined in
- // ExpectFrameExtents).
- ExpectFrameExtents(
- VideoFrame::RGB32, 1, 4, "de6d3d567e282f6a38d478f04fc81fb0");
- ExpectFrameExtents(
- VideoFrame::YV12, 3, 1, "71113bdfd4c0de6cf62f48fb74f7a0b1");
- ExpectFrameExtents(
- VideoFrame::YV16, 3, 1, "9bb99ac3ff350644ebff4d28dc01b461");
-}
-
-} // namespace media
diff --git a/src/media/base/video_renderer.cc b/src/media/base/video_renderer.cc
deleted file mode 100644
index 00a8f21..0000000
--- a/src/media/base/video_renderer.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_renderer.h"
-
-namespace media {
-
-VideoRenderer::VideoRenderer() {}
-VideoRenderer::~VideoRenderer() {}
-
-} // namespace media
diff --git a/src/media/base/video_renderer.h b/src/media/base/video_renderer.h
deleted file mode 100644
index d2d302f..0000000
--- a/src/media/base/video_renderer.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_RENDERER_H_
-#define MEDIA_BASE_VIDEO_RENDERER_H_
-
-#include <list>
-
-#include "base/callback.h"
-#include "base/memory/ref_counted.h"
-#include "base/time.h"
-#include "media/base/media_export.h"
-#include "media/base/pipeline_status.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-class DemuxerStream;
-class VideoDecoder;
-
-class MEDIA_EXPORT VideoRenderer
- : public base::RefCountedThreadSafe<VideoRenderer> {
- public:
- typedef std::list<scoped_refptr<VideoDecoder> > VideoDecoderList;
-
- // Used to update the pipeline's clock time. The parameter is the time that
- // the clock should not exceed.
- typedef base::Callback<void(base::TimeDelta)> TimeCB;
-
- // Executed when the natural size of the video has changed.
- typedef base::Callback<void(const gfx::Size& size)> NaturalSizeChangedCB;
-
- // Used to query the current time or duration of the media.
- typedef base::Callback<base::TimeDelta()> TimeDeltaCB;
-
- // Initialize a VideoRenderer with the given DemuxerStream and
- // VideoDecoderList, executing |init_cb| callback upon completion.
- //
- // |statistics_cb| is executed periodically with video rendering stats, such
- // as dropped frames.
- //
- // |time_cb| is executed whenever time has advanced by way of video rendering.
- //
- // |size_changed_cb| is executed whenever the dimensions of the video has
- // changed.
- //
- // |ended_cb| is executed when video rendering has reached the end of stream.
- //
- // |error_cb| is executed if an error was encountered.
- //
- // |get_time_cb| is used to query the current media playback time.
- //
- // |get_duration_cb| is used to query the media duration.
- virtual void Initialize(const scoped_refptr<DemuxerStream>& stream,
- const VideoDecoderList& decoders,
- const PipelineStatusCB& init_cb,
- const StatisticsCB& statistics_cb,
- const TimeCB& time_cb,
- const NaturalSizeChangedCB& size_changed_cb,
- const base::Closure& ended_cb,
- const PipelineStatusCB& error_cb,
- const TimeDeltaCB& get_time_cb,
- const TimeDeltaCB& get_duration_cb) = 0;
-
- // Start audio decoding and rendering at the current playback rate, executing
- // |callback| when playback is underway.
- virtual void Play(const base::Closure& callback) = 0;
-
- // Temporarily suspend decoding and rendering video, executing |callback| when
- // playback has been suspended.
- virtual void Pause(const base::Closure& callback) = 0;
-
- // Discard any video data, executing |callback| when completed.
- virtual void Flush(const base::Closure& callback) = 0;
-
- // Start prerolling video data for samples starting at |time|, executing
- // |callback| when completed.
- //
- // Only valid to call after a successful Initialize() or Flush().
- virtual void Preroll(base::TimeDelta time,
- const PipelineStatusCB& callback) = 0;
-
- // Stop all operations in preparation for being deleted, executing |callback|
- // when complete.
- virtual void Stop(const base::Closure& callback) = 0;
-
- // Updates the current playback rate.
- virtual void SetPlaybackRate(float playback_rate) = 0;
-
- protected:
- friend class base::RefCountedThreadSafe<VideoRenderer>;
-
- VideoRenderer();
- virtual ~VideoRenderer();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VideoRenderer);
-};
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_RENDERER_H_
diff --git a/src/media/base/video_resolution.h b/src/media/base/video_resolution.h
deleted file mode 100644
index 29ae65f..0000000
--- a/src/media/base/video_resolution.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2016 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BASE_VIDEO_RESOLUTION_H_
-#define MEDIA_BASE_VIDEO_RESOLUTION_H_
-
-#include "base/logging.h"
-#include "media/base/media_export.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-// Enumerates the various representations of the resolution of videos. Note
-// that except |kVideoResolutionInvalid|, all other values are guaranteed to be
-// in the same order as its (width, height) pair. Note, unlike the other valid
-// resolution levels, |kVideoResolutionHighRes| is not a 16:9 resolution.
-enum VideoResolution {
- kVideoResolution1080p, // 1920 x 1080
- kVideoResolution2k, // 2560 x 1440
- kVideoResolution4k, // 3840 x 2160
- kVideoResolution5k, // 5120 × 2880
- kVideoResolution8k, // 7680 x 4320
- kVideoResolutionHighRes, // 8192 x 8192
- kVideoResolutionInvalid
-};
-
-inline VideoResolution GetVideoResolution(int width, int height) {
- if (width <= 1920 && height <= 1080) {
- return kVideoResolution1080p;
- }
- if (width <= 2560 && height <= 1440) {
- return kVideoResolution2k;
- }
- if (width <= 3840 && height <= 2160) {
- return kVideoResolution4k;
- }
- if (width <= 5120 && height <= 2880) {
- return kVideoResolution5k;
- }
- if (width <= 7680 && height <= 4320) {
- return kVideoResolution8k;
- }
- if (width <= 8192 && height <= 8192) {
- return kVideoResolutionHighRes;
- }
- DLOG(FATAL) << "Invalid VideoResolution: width: " << width
- << " height: " << height;
- return kVideoResolutionInvalid;
-}
-
-inline VideoResolution GetVideoResolution(const gfx::Size& size) {
- return GetVideoResolution(size.width(), size.height());
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_RESOLUTION_H_
diff --git a/src/media/base/video_types.h b/src/media/base/video_types.h
deleted file mode 100644
index f340711..0000000
--- a/src/media/base/video_types.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_TYPES_H_
-#define MEDIA_BASE_VIDEO_TYPES_H_
-
-#include <string>
-
-#include "build/build_config.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// Pixel formats roughly based on FOURCC labels, see:
-// http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
-// Logged to UMA, so never reuse values. Leave gaps if necessary.
-// Ordered as planar, semi-planar, YUV-packed, and RGB formats.
-enum VideoPixelFormat {
- PIXEL_FORMAT_UNKNOWN = 0, // Unknown or unspecified format value.
- PIXEL_FORMAT_I420 =
- 1, // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
- PIXEL_FORMAT_YV12 = 2, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
- PIXEL_FORMAT_YV16 = 3, // 16bpp YVU planar 1x1 Y, 2x1 VU samples.
- PIXEL_FORMAT_YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
- PIXEL_FORMAT_YV24 = 5, // 24bpp YUV planar, no subsampling.
- PIXEL_FORMAT_NV12 =
- 6, // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
- PIXEL_FORMAT_NV21 =
- 7, // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
- PIXEL_FORMAT_UYVY =
- 8, // 16bpp interleaved 2x1 U, 1x1 Y, 2x1 V, 1x1 Y samples.
- PIXEL_FORMAT_YUY2 =
- 9, // 16bpp interleaved 1x1 Y, 2x1 U, 1x1 Y, 2x1 V samples.
- PIXEL_FORMAT_ARGB = 10, // 32bpp ARGB, 1 plane.
- PIXEL_FORMAT_XRGB = 11, // 24bpp XRGB, 1 plane.
- PIXEL_FORMAT_RGB24 = 12, // 24bpp BGR, 1 plane.
- PIXEL_FORMAT_RGB32 = 13, // 32bpp BGRA, 1 plane.
- PIXEL_FORMAT_MJPEG = 14, // MJPEG compressed.
- // MediaTek proprietary format. MT21 is similar to NV21 except the memory
- // layout and pixel layout (swizzles). 12bpp with Y plane followed by a 2x2
- // interleaved VU plane. Each image contains two buffers -- Y plane and VU
- // plane. Two planes can be non-contiguous in memory. The starting addresses
- // of Y plane and VU plane are 4KB alignment.
- // Suppose image dimension is (width, height). For both Y plane and VU plane:
- // Row pitch = ((width+15)/16) * 16.
- // Plane size = Row pitch * (((height+31)/32)*32)
- PIXEL_FORMAT_MT21 = 15,
-
- PIXEL_FORMAT_YUV420P9 = 16,
- PIXEL_FORMAT_YUV420P10 = 17,
- PIXEL_FORMAT_YUV422P9 = 18,
- PIXEL_FORMAT_YUV422P10 = 19,
- PIXEL_FORMAT_YUV444P9 = 20,
- PIXEL_FORMAT_YUV444P10 = 21,
-
- PIXEL_FORMAT_YUV420P12 = 22,
- PIXEL_FORMAT_YUV422P12 = 23,
- PIXEL_FORMAT_YUV444P12 = 24,
-
- PIXEL_FORMAT_Y8 = 25, // single 8bpp plane.
- PIXEL_FORMAT_Y16 = 26, // single 16bpp plane.
-
- PIXEL_FORMAT_I422 =
- 27, // 16bpp YUV planar 1x1 Y, 2x1 UV samples, a.k.a. YU16.
-
- // Please update UMA histogram enumeration when adding new formats here.
- PIXEL_FORMAT_MAX =
- PIXEL_FORMAT_I422, // Must always be equal to largest entry logged.
-};
-
-// Color space or color range used for the pixels.
-// Logged to UMA, so never reuse values. Leave gaps if necessary.
-enum ColorSpace {
- COLOR_SPACE_UNSPECIFIED = 0, // In general this is Rec601.
- // The JPEG color space is the combination of Rec.601 and full range colors
- // (aka pc range colors).
- COLOR_SPACE_JPEG = 1,
- COLOR_SPACE_HD_REC709 = 2, // Rec709 "HD" color space.
- COLOR_SPACE_SD_REC601 = 3, // Rec601 "SD" color space.
- COLOR_SPACE_MAX = COLOR_SPACE_SD_REC601,
-};
-
-// Returns the name of a Format as a string.
-MEDIA_EXPORT std::string VideoPixelFormatToString(VideoPixelFormat format);
-
-// Returns true if |format| is a YUV format with multiple planes.
-MEDIA_EXPORT bool IsYuvPlanar(VideoPixelFormat format);
-
-// Returns true if |format| has no Alpha channel (hence is always opaque).
-MEDIA_EXPORT bool IsOpaque(VideoPixelFormat format);
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_TYPES_H_
diff --git a/src/media/base/video_util.cc b/src/media/base/video_util.cc
deleted file mode 100644
index 4bef0ad..0000000
--- a/src/media/base/video_util.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_util.h"
-
-#include <cmath>
-
-#include "base/logging.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-gfx::Size GetNaturalSize(const gfx::Size& visible_size,
- int aspect_ratio_numerator,
- int aspect_ratio_denominator) {
- if (aspect_ratio_denominator == 0 ||
- aspect_ratio_numerator < 0 ||
- aspect_ratio_denominator < 0)
- return gfx::Size();
-
- double aspect_ratio = aspect_ratio_numerator /
- static_cast<double>(aspect_ratio_denominator);
-
- int width = floor(visible_size.width() * aspect_ratio + 0.5);
- int height = visible_size.height();
-
- // An even width makes things easier for YV12 and appears to be the behavior
- // expected by WebKit layout tests.
- return gfx::Size(width & ~1, height);
-}
-
-static void CopyPlane(size_t plane, const uint8* source, int stride, int rows,
- VideoFrame* frame) {
- uint8* dest = frame->data(plane);
- int dest_stride = frame->stride(plane);
-
- // Clamp in case source frame has smaller stride.
- int bytes_to_copy_per_row = std::min(frame->row_bytes(plane), stride);
-
- // Clamp in case source frame has smaller height.
- int rows_to_copy = std::min(frame->rows(plane), rows);
-
- // Copy!
- for (int row = 0; row < rows_to_copy; ++row) {
- memcpy(dest, source, bytes_to_copy_per_row);
- source += stride;
- dest += dest_stride;
- }
-}
-
-void CopyYPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
- CopyPlane(VideoFrame::kYPlane, source, stride, rows, frame);
-}
-
-void CopyUPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
- CopyPlane(VideoFrame::kUPlane, source, stride, rows, frame);
-}
-
-void CopyVPlane(const uint8* source, int stride, int rows, VideoFrame* frame) {
- CopyPlane(VideoFrame::kVPlane, source, stride, rows, frame);
-}
-
-void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v) {
- // Fill the Y plane.
- uint8* y_plane = frame->data(VideoFrame::kYPlane);
- int y_rows = frame->rows(VideoFrame::kYPlane);
- int y_row_bytes = frame->row_bytes(VideoFrame::kYPlane);
- for (int i = 0; i < y_rows; ++i) {
- memset(y_plane, y, y_row_bytes);
- y_plane += frame->stride(VideoFrame::kYPlane);
- }
-
- // Fill the U and V planes.
- uint8* u_plane = frame->data(VideoFrame::kUPlane);
- uint8* v_plane = frame->data(VideoFrame::kVPlane);
- int uv_rows = frame->rows(VideoFrame::kUPlane);
- int u_row_bytes = frame->row_bytes(VideoFrame::kUPlane);
- int v_row_bytes = frame->row_bytes(VideoFrame::kVPlane);
- for (int i = 0; i < uv_rows; ++i) {
- memset(u_plane, u, u_row_bytes);
- memset(v_plane, v, v_row_bytes);
- u_plane += frame->stride(VideoFrame::kUPlane);
- v_plane += frame->stride(VideoFrame::kVPlane);
- }
-}
-
-} // namespace media
diff --git a/src/media/base/video_util.h b/src/media/base/video_util.h
deleted file mode 100644
index 562ad7d..0000000
--- a/src/media/base/video_util.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_UTIL_H_
-#define MEDIA_BASE_VIDEO_UTIL_H_
-
-#include "base/basictypes.h"
-#include "media/base/media_export.h"
-#include "ui/gfx/size.h"
-
-namespace media {
-
-class VideoFrame;
-
-// Computes the size of |visible_size| for a given aspect ratio.
-MEDIA_EXPORT gfx::Size GetNaturalSize(const gfx::Size& visible_size,
- int aspect_ratio_numerator,
- int aspect_ratio_denominator);
-
-// Copies a plane of YUV source into a VideoFrame object, taking into account
-// source and destinations dimensions.
-//
-// NOTE: rows is *not* the same as height!
-MEDIA_EXPORT void CopyYPlane(const uint8* source, int stride, int rows,
- VideoFrame* frame);
-MEDIA_EXPORT void CopyUPlane(const uint8* source, int stride, int rows,
- VideoFrame* frame);
-MEDIA_EXPORT void CopyVPlane(const uint8* source, int stride, int rows,
- VideoFrame* frame);
-
-// Fills |frame| containing YUV data to the given color values.
-MEDIA_EXPORT void FillYUV(VideoFrame* frame, uint8 y, uint8 u, uint8 v);
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_UTIL_H_
diff --git a/src/media/base/video_util_unittest.cc b/src/media/base/video_util_unittest.cc
deleted file mode 100644
index d4f2e29..0000000
--- a/src/media/base/video_util_unittest.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-#include "media/base/video_frame.h"
-#include "media/base/video_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-class VideoUtilTest : public testing::Test {
- public:
- VideoUtilTest()
- : height_(0),
- y_stride_(0),
- u_stride_(0),
- v_stride_(0) {
- }
-
- virtual ~VideoUtilTest() {}
-
- void CreateSourceFrame(int width, int height,
- int y_stride, int u_stride, int v_stride) {
- EXPECT_GE(y_stride, width);
- EXPECT_GE(u_stride, width / 2);
- EXPECT_GE(v_stride, width / 2);
-
- height_ = height;
- y_stride_ = y_stride;
- u_stride_ = u_stride;
- v_stride_ = v_stride;
-
- y_plane_.reset(new uint8[y_stride * height]);
- u_plane_.reset(new uint8[u_stride * height / 2]);
- v_plane_.reset(new uint8[v_stride * height / 2]);
- }
-
- void CreateDestinationFrame(int width, int height) {
- gfx::Size size(width, height);
- destination_frame_ =
- VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), size,
- base::TimeDelta());
- }
-
- void CopyPlanes() {
- CopyYPlane(y_plane_.get(), y_stride_, height_, destination_frame_);
- CopyUPlane(u_plane_.get(), u_stride_, height_ / 2, destination_frame_);
- CopyVPlane(v_plane_.get(), v_stride_, height_ / 2, destination_frame_);
- }
-
- private:
- scoped_array<uint8> y_plane_;
- scoped_array<uint8> u_plane_;
- scoped_array<uint8> v_plane_;
-
- int height_;
- int y_stride_;
- int u_stride_;
- int v_stride_;
-
- scoped_refptr<VideoFrame> destination_frame_;
-
- DISALLOW_COPY_AND_ASSIGN(VideoUtilTest);
-};
-
-TEST_F(VideoUtilTest, CopyPlane_Exact) {
- CreateSourceFrame(16, 16, 16, 8, 8);
- CreateDestinationFrame(16, 16);
- CopyPlanes();
-}
-
-TEST_F(VideoUtilTest, CopyPlane_SmallerSource) {
- CreateSourceFrame(8, 8, 8, 4, 4);
- CreateDestinationFrame(16, 16);
- CopyPlanes();
-}
-
-TEST_F(VideoUtilTest, CopyPlane_SmallerDestination) {
- CreateSourceFrame(16, 16, 16, 8, 8);
- CreateDestinationFrame(8, 8);
- CopyPlanes();
-}
-
-} // namespace media
diff --git a/src/media/base/yuv_convert.cc b/src/media/base/yuv_convert.cc
deleted file mode 100644
index 4969a5c..0000000
--- a/src/media/base/yuv_convert.cc
+++ /dev/null
@@ -1,594 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This webpage shows layout of YV12 and other YUV formats
-// http://www.fourcc.org/yuv.php
-// The actual conversion is best described here
-// http://en.wikipedia.org/wiki/YUV
-// An article on optimizing YUV conversion using tables instead of multiplies
-// http://lestourtereaux.free.fr/papers/data/yuvrgb.pdf
-//
-// YV12 is a full plane of Y and a half height, half width chroma planes
-// YV16 is a full plane of Y and a full height, half width chroma planes
-//
-// ARGB pixel format is output, which on little endian is stored as BGRA.
-// The alpha is set to 255, allowing the application to use RGBA or RGB32.
-
-#include "media/base/yuv_convert.h"
-
-#include "base/cpu.h"
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "build/build_config.h"
-#include "media/base/simd/convert_rgb_to_yuv.h"
-#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/simd/filter_yuv.h"
-
-#if defined(ARCH_CPU_X86_FAMILY)
-#if defined(COMPILER_MSVC)
-#include <intrin.h>
-#else
-#include <mmintrin.h>
-#endif
-#endif
-
-namespace media {
-
-static FilterYUVRowsProc ChooseFilterYUVRowsProc() {
-#if defined(ARCH_CPU_X86_FAMILY)
- base::CPU cpu;
- if (cpu.has_sse2())
- return &FilterYUVRows_SSE2;
- if (cpu.has_mmx())
- return &FilterYUVRows_MMX;
-#endif
- return &FilterYUVRows_C;
-}
-
-static ConvertYUVToRGB32RowProc ChooseConvertYUVToRGB32RowProc() {
-#if defined(ARCH_CPU_X86_FAMILY)
- base::CPU cpu;
- if (cpu.has_sse())
- return &ConvertYUVToRGB32Row_SSE;
- if (cpu.has_mmx())
- return &ConvertYUVToRGB32Row_MMX;
-#endif
- return &ConvertYUVToRGB32Row_C;
-}
-
-static ScaleYUVToRGB32RowProc ChooseScaleYUVToRGB32RowProc() {
-#if defined(ARCH_CPU_X86_64)
- // Use 64-bits version if possible.
- return &ScaleYUVToRGB32Row_SSE2_X64;
-#elif defined(ARCH_CPU_X86_FAMILY)
- base::CPU cpu;
- // Choose the best one on 32-bits system.
- if (cpu.has_sse())
- return &ScaleYUVToRGB32Row_SSE;
- if (cpu.has_mmx())
- return &ScaleYUVToRGB32Row_MMX;
-#endif // defined(ARCH_CPU_X86_64)
- return &ScaleYUVToRGB32Row_C;
-}
-
-static ScaleYUVToRGB32RowProc ChooseLinearScaleYUVToRGB32RowProc() {
-#if defined(ARCH_CPU_X86_64)
- // Use 64-bits version if possible.
- return &LinearScaleYUVToRGB32Row_MMX_X64;
-#elif defined(ARCH_CPU_X86_FAMILY)
- base::CPU cpu;
- // 32-bits system.
- if (cpu.has_sse())
- return &LinearScaleYUVToRGB32Row_SSE;
- if (cpu.has_mmx())
- return &LinearScaleYUVToRGB32Row_MMX;
-#endif // defined(ARCH_CPU_X86_64)
- return &LinearScaleYUVToRGB32Row_C;
-}
-
-// Empty SIMD registers state after using them.
-void EmptyRegisterState() {
-#if defined(ARCH_CPU_X86_FAMILY)
- static bool checked = false;
- static bool has_mmx = false;
- if (!checked) {
- base::CPU cpu;
- has_mmx = cpu.has_mmx();
- checked = true;
- }
- if (has_mmx)
- _mm_empty();
-#endif
-}
-
-// 16.16 fixed point arithmetic
-const int kFractionBits = 16;
-const int kFractionMax = 1 << kFractionBits;
-const int kFractionMask = ((1 << kFractionBits) - 1);
-
-// Scale a frame of YUV to 32 bit ARGB.
-void ScaleYUVToRGB32(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int source_width,
- int source_height,
- int width,
- int height,
- int y_pitch,
- int uv_pitch,
- int rgb_pitch,
- YUVType yuv_type,
- Rotate view_rotate,
- ScaleFilter filter) {
- static FilterYUVRowsProc filter_proc = NULL;
- static ConvertYUVToRGB32RowProc convert_proc = NULL;
- static ScaleYUVToRGB32RowProc scale_proc = NULL;
- static ScaleYUVToRGB32RowProc linear_scale_proc = NULL;
-
- if (!filter_proc)
- filter_proc = ChooseFilterYUVRowsProc();
- if (!convert_proc)
- convert_proc = ChooseConvertYUVToRGB32RowProc();
- if (!scale_proc)
- scale_proc = ChooseScaleYUVToRGB32RowProc();
- if (!linear_scale_proc)
- linear_scale_proc = ChooseLinearScaleYUVToRGB32RowProc();
-
- // Handle zero sized sources and destinations.
- if ((yuv_type == YV12 && (source_width < 2 || source_height < 2)) ||
- (yuv_type == YV16 && (source_width < 2 || source_height < 1)) ||
- width == 0 || height == 0)
- return;
-
- // 4096 allows 3 buffers to fit in 12k.
- // Helps performance on CPU with 16K L1 cache.
- // Large enough for 3830x2160 and 30" displays which are 2560x1600.
- const int kFilterBufferSize = 4096;
- // Disable filtering if the screen is too big (to avoid buffer overflows).
- // This should never happen to regular users: they don't have monitors
- // wider than 4096 pixels.
- // TODO(fbarchard): Allow rotated videos to filter.
- if (source_width > kFilterBufferSize || view_rotate)
- filter = FILTER_NONE;
-
- unsigned int y_shift = yuv_type;
- // Diagram showing origin and direction of source sampling.
- // ->0 4<-
- // 7 3
- //
- // 6 5
- // ->1 2<-
- // Rotations that start at right side of image.
- if ((view_rotate == ROTATE_180) ||
- (view_rotate == ROTATE_270) ||
- (view_rotate == MIRROR_ROTATE_0) ||
- (view_rotate == MIRROR_ROTATE_90)) {
- y_buf += source_width - 1;
- u_buf += source_width / 2 - 1;
- v_buf += source_width / 2 - 1;
- source_width = -source_width;
- }
- // Rotations that start at bottom of image.
- if ((view_rotate == ROTATE_90) ||
- (view_rotate == ROTATE_180) ||
- (view_rotate == MIRROR_ROTATE_90) ||
- (view_rotate == MIRROR_ROTATE_180)) {
- y_buf += (source_height - 1) * y_pitch;
- u_buf += ((source_height >> y_shift) - 1) * uv_pitch;
- v_buf += ((source_height >> y_shift) - 1) * uv_pitch;
- source_height = -source_height;
- }
-
- int source_dx = source_width * kFractionMax / width;
-
- if ((view_rotate == ROTATE_90) ||
- (view_rotate == ROTATE_270)) {
- int tmp = height;
- height = width;
- width = tmp;
- tmp = source_height;
- source_height = source_width;
- source_width = tmp;
- int source_dy = source_height * kFractionMax / height;
- source_dx = ((source_dy >> kFractionBits) * y_pitch) << kFractionBits;
- if (view_rotate == ROTATE_90) {
- y_pitch = -1;
- uv_pitch = -1;
- source_height = -source_height;
- } else {
- y_pitch = 1;
- uv_pitch = 1;
- }
- }
-
- // Need padding because FilterRows() will write 1 to 16 extra pixels
- // after the end for SSE2 version.
- uint8 yuvbuf[16 + kFilterBufferSize * 3 + 16];
- uint8* ybuf =
- reinterpret_cast<uint8*>(reinterpret_cast<uintptr_t>(yuvbuf + 15) & ~15);
- uint8* ubuf = ybuf + kFilterBufferSize;
- uint8* vbuf = ubuf + kFilterBufferSize;
-
- // TODO(fbarchard): Fixed point math is off by 1 on negatives.
-
- // We take a y-coordinate in [0,1] space in the source image space, and
- // transform to a y-coordinate in [0,1] space in the destination image space.
- // Note that the coordinate endpoints lie on pixel boundaries, not on pixel
- // centers: e.g. a two-pixel-high image will have pixel centers at 0.25 and
- // 0.75. The formula is as follows (in fixed-point arithmetic):
- // y_dst = dst_height * ((y_src + 0.5) / src_height)
- // dst_pixel = clamp([0, dst_height - 1], floor(y_dst - 0.5))
- // Implement this here as an accumulator + delta, to avoid expensive math
- // in the loop.
- int source_y_subpixel_accum =
- ((kFractionMax / 2) * source_height) / height - (kFractionMax / 2);
- int source_y_subpixel_delta = ((1 << kFractionBits) * source_height) / height;
-
- // TODO(fbarchard): Split this into separate function for better efficiency.
- for (int y = 0; y < height; ++y) {
- uint8* dest_pixel = rgb_buf + y * rgb_pitch;
- int source_y_subpixel = source_y_subpixel_accum;
- source_y_subpixel_accum += source_y_subpixel_delta;
- if (source_y_subpixel < 0)
- source_y_subpixel = 0;
- else if (source_y_subpixel > ((source_height - 1) << kFractionBits))
- source_y_subpixel = (source_height - 1) << kFractionBits;
-
- const uint8* y_ptr = NULL;
- const uint8* u_ptr = NULL;
- const uint8* v_ptr = NULL;
- // Apply vertical filtering if necessary.
- // TODO(fbarchard): Remove memcpy when not necessary.
- if (filter & media::FILTER_BILINEAR_V) {
- int source_y = source_y_subpixel >> kFractionBits;
- y_ptr = y_buf + source_y * y_pitch;
- u_ptr = u_buf + (source_y >> y_shift) * uv_pitch;
- v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
-
- // Vertical scaler uses 16.8 fixed point.
- int source_y_fraction =
- (source_y_subpixel & kFractionMask) >> 8;
- if (source_y_fraction != 0) {
- filter_proc(ybuf, y_ptr, y_ptr + y_pitch, source_width,
- source_y_fraction);
- } else {
- memcpy(ybuf, y_ptr, source_width);
- }
- y_ptr = ybuf;
- ybuf[source_width] = ybuf[source_width-1];
-
- int uv_source_width = (source_width + 1) / 2;
- int source_uv_fraction;
-
- // For formats with half-height UV planes, each even-numbered pixel row
- // should not interpolate, since the next row to interpolate from should
- // be a duplicate of the current row.
- if (y_shift && (source_y & 0x1) == 0)
- source_uv_fraction = 0;
- else
- source_uv_fraction = source_y_fraction;
-
- if (source_uv_fraction != 0) {
- filter_proc(ubuf, u_ptr, u_ptr + uv_pitch, uv_source_width,
- source_uv_fraction);
- filter_proc(vbuf, v_ptr, v_ptr + uv_pitch, uv_source_width,
- source_uv_fraction);
- } else {
- memcpy(ubuf, u_ptr, uv_source_width);
- memcpy(vbuf, v_ptr, uv_source_width);
- }
- u_ptr = ubuf;
- v_ptr = vbuf;
- ubuf[uv_source_width] = ubuf[uv_source_width - 1];
- vbuf[uv_source_width] = vbuf[uv_source_width - 1];
- } else {
- // Offset by 1/2 pixel for center sampling.
- int source_y = (source_y_subpixel + (kFractionMax / 2)) >> kFractionBits;
- y_ptr = y_buf + source_y * y_pitch;
- u_ptr = u_buf + (source_y >> y_shift) * uv_pitch;
- v_ptr = v_buf + (source_y >> y_shift) * uv_pitch;
- }
- if (source_dx == kFractionMax) { // Not scaled
- convert_proc(y_ptr, u_ptr, v_ptr, dest_pixel, width);
- } else {
- if (filter & FILTER_BILINEAR_H) {
- linear_scale_proc(y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
- } else {
- scale_proc(y_ptr, u_ptr, v_ptr, dest_pixel, width, source_dx);
- }
- }
- }
-
- EmptyRegisterState();
-}
-
-// Scale a frame of YV12 to 32 bit ARGB for a specific rectangle.
-void ScaleYUVToRGB32WithRect(const uint8* y_buf,
- const uint8* u_buf,
- const uint8* v_buf,
- uint8* rgb_buf,
- int source_width,
- int source_height,
- int dest_width,
- int dest_height,
- int dest_rect_left,
- int dest_rect_top,
- int dest_rect_right,
- int dest_rect_bottom,
- int y_pitch,
- int uv_pitch,
- int rgb_pitch) {
- static FilterYUVRowsProc filter_proc = NULL;
- if (!filter_proc)
- filter_proc = ChooseFilterYUVRowsProc();
-
- // This routine doesn't currently support up-scaling.
- CHECK_LE(dest_width, source_width);
- CHECK_LE(dest_height, source_height);
-
- // Sanity-check the destination rectangle.
- DCHECK(dest_rect_left >= 0 && dest_rect_right <= dest_width);
- DCHECK(dest_rect_top >= 0 && dest_rect_bottom <= dest_height);
- DCHECK(dest_rect_right > dest_rect_left);
- DCHECK(dest_rect_bottom > dest_rect_top);
-
- // Fixed-point value of vertical and horizontal scale down factor.
- // Values are in the format 16.16.
- int y_step = kFractionMax * source_height / dest_height;
- int x_step = kFractionMax * source_width / dest_width;
-
- // Determine the coordinates of the rectangle in 16.16 coords.
- // NB: Our origin is the *center* of the top/left pixel, NOT its top/left.
- // If we're down-scaling by more than a factor of two, we start with a 50%
- // fraction to avoid degenerating to point-sampling - we should really just
- // fix the fraction at 50% for all pixels in that case.
- int source_left = dest_rect_left * x_step;
- int source_right = (dest_rect_right - 1) * x_step;
- if (x_step < kFractionMax * 2) {
- source_left += ((x_step - kFractionMax) / 2);
- source_right += ((x_step - kFractionMax) / 2);
- } else {
- source_left += kFractionMax / 2;
- source_right += kFractionMax / 2;
- }
- int source_top = dest_rect_top * y_step;
- if (y_step < kFractionMax * 2) {
- source_top += ((y_step - kFractionMax) / 2);
- } else {
- source_top += kFractionMax / 2;
- }
-
- // Determine the parts of the Y, U and V buffers to interpolate.
- int source_y_left = source_left >> kFractionBits;
- int source_y_right = std::min(
- (source_right >> kFractionBits) + 2,
- source_width + 1);
-
- int source_uv_left = source_y_left / 2;
- int source_uv_right = std::min(
- (source_right >> (kFractionBits + 1)) + 2,
- (source_width + 1) / 2);
-
- int source_y_width = source_y_right - source_y_left;
- int source_uv_width = source_uv_right - source_uv_left;
-
- // Determine number of pixels in each output row.
- int dest_rect_width = dest_rect_right - dest_rect_left;
-
- // Intermediate buffer for vertical interpolation.
- // 4096 bytes allows 3 buffers to fit in 12k, which fits in a 16K L1 cache,
- // and is bigger than most users will generally need.
- // The buffer is 16-byte aligned and padded with 16 extra bytes; some of the
- // FilterYUVRowProcs have alignment requirements, and the SSE version can
- // write up to 16 bytes past the end of the buffer.
- const int kFilterBufferSize = 4096;
- if (source_width > kFilterBufferSize)
- filter_proc = NULL;
- uint8 yuv_temp[16 + kFilterBufferSize * 3 + 16];
- uint8* y_temp =
- reinterpret_cast<uint8*>(
- reinterpret_cast<uintptr_t>(yuv_temp + 15) & ~15);
- uint8* u_temp = y_temp + kFilterBufferSize;
- uint8* v_temp = u_temp + kFilterBufferSize;
-
- // Move to the top-left pixel of output.
- rgb_buf += dest_rect_top * rgb_pitch;
- rgb_buf += dest_rect_left * 4;
-
- // For each destination row perform interpolation and color space
- // conversion to produce the output.
- for (int row = dest_rect_top; row < dest_rect_bottom; ++row) {
- // Round the fixed-point y position to get the current row.
- int source_row = source_top >> kFractionBits;
- int source_uv_row = source_row / 2;
- DCHECK(source_row < source_height);
-
- // Locate the first row for each plane for interpolation.
- const uint8* y0_ptr = y_buf + y_pitch * source_row + source_y_left;
- const uint8* u0_ptr = u_buf + uv_pitch * source_uv_row + source_uv_left;
- const uint8* v0_ptr = v_buf + uv_pitch * source_uv_row + source_uv_left;
- const uint8* y1_ptr = NULL;
- const uint8* u1_ptr = NULL;
- const uint8* v1_ptr = NULL;
-
- // Locate the second row for interpolation, being careful not to overrun.
- if (source_row + 1 >= source_height) {
- y1_ptr = y0_ptr;
- } else {
- y1_ptr = y0_ptr + y_pitch;
- }
- if (source_uv_row + 1 >= (source_height + 1) / 2) {
- u1_ptr = u0_ptr;
- v1_ptr = v0_ptr;
- } else {
- u1_ptr = u0_ptr + uv_pitch;
- v1_ptr = v0_ptr + uv_pitch;
- }
-
- if (filter_proc) {
- // Vertical scaler uses 16.8 fixed point.
- int fraction = (source_top & kFractionMask) >> 8;
- filter_proc(y_temp + source_y_left, y0_ptr, y1_ptr,
- source_y_width, fraction);
- filter_proc(u_temp + source_uv_left, u0_ptr, u1_ptr,
- source_uv_width, fraction);
- filter_proc(v_temp + source_uv_left, v0_ptr, v1_ptr,
- source_uv_width, fraction);
-
- // Perform horizontal interpolation and color space conversion.
- // TODO(hclam): Use the MMX version after more testing.
- LinearScaleYUVToRGB32RowWithRange_C(
- y_temp, u_temp, v_temp, rgb_buf,
- dest_rect_width, source_left, x_step);
- } else {
- // If the frame is too large then we linear scale a single row.
- LinearScaleYUVToRGB32RowWithRange_C(
- y0_ptr, u0_ptr, v0_ptr, rgb_buf,
- dest_rect_width, source_left, x_step);
- }
-
- // Advance vertically in the source and destination image.
- source_top += y_step;
- rgb_buf += rgb_pitch;
- }
-
- EmptyRegisterState();
-}
-
-void ConvertRGB32ToYUV(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
- static void (*convert_proc)(const uint8*, uint8*, uint8*, uint8*,
- int, int, int, int, int) = NULL;
- if (!convert_proc) {
-#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
- // For ARM and MIPS processors, always use C version.
- // TODO(hclam): Implement a NEON version.
- convert_proc = &ConvertRGB32ToYUV_C;
-#elif defined(__LB_PS3__)
- // For these, always use C version.
- convert_proc = &ConvertRGB32ToYUV_C;
-#else
- // TODO(hclam): Switch to SSSE3 version when the cyan problem is solved.
- // See: crbug.com/100462
- base::CPU cpu;
- if (cpu.has_sse2())
- convert_proc = &ConvertRGB32ToYUV_SSE2;
- else
- convert_proc = &ConvertRGB32ToYUV_C;
-#endif
- }
-
- convert_proc(rgbframe, yplane, uplane, vplane, width, height,
- rgbstride, ystride, uvstride);
-}
-
-void ConvertRGB24ToYUV(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride) {
-#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
- ConvertRGB24ToYUV_C(rgbframe, yplane, uplane, vplane, width, height,
- rgbstride, ystride, uvstride);
-#else
- static void (*convert_proc)(const uint8*, uint8*, uint8*, uint8*,
- int, int, int, int, int) = NULL;
- if (!convert_proc) {
- base::CPU cpu;
- if (cpu.has_ssse3())
- convert_proc = &ConvertRGB24ToYUV_SSSE3;
- else
- convert_proc = &ConvertRGB24ToYUV_C;
- }
- convert_proc(rgbframe, yplane, uplane, vplane, width, height,
- rgbstride, ystride, uvstride);
-#endif
-}
-
-void ConvertYUY2ToYUV(const uint8* src,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height) {
- for (int i = 0; i < height / 2; ++i) {
- for (int j = 0; j < (width / 2); ++j) {
- yplane[0] = src[0];
- *uplane = src[1];
- yplane[1] = src[2];
- *vplane = src[3];
- src += 4;
- yplane += 2;
- uplane++;
- vplane++;
- }
- for (int j = 0; j < (width / 2); ++j) {
- yplane[0] = src[0];
- yplane[1] = src[2];
- src += 4;
- yplane += 2;
- }
- }
-}
-
-void ConvertNV21ToYUV(const uint8* src,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height) {
- int y_plane_size = width * height;
- memcpy(yplane, src, y_plane_size);
-
- src += y_plane_size;
- int u_plane_size = y_plane_size >> 2;
- for (int i = 0; i < u_plane_size; ++i) {
- *vplane++ = *src++;
- *uplane++ = *src++;
- }
-}
-
-void ConvertYUVToRGB32(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type) {
-#if defined(ARCH_CPU_ARM_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
- ConvertYUVToRGB32_C(yplane, uplane, vplane, rgbframe,
- width, height, ystride, uvstride, rgbstride, yuv_type);
-#else
- static ConvertYUVToRGB32Proc convert_proc = NULL;
- if (!convert_proc) {
- base::CPU cpu;
- if (cpu.has_sse())
- convert_proc = &ConvertYUVToRGB32_SSE;
- else if (cpu.has_mmx())
- convert_proc = &ConvertYUVToRGB32_MMX;
- else
- convert_proc = &ConvertYUVToRGB32_C;
- }
-
- convert_proc(yplane, uplane, vplane, rgbframe,
- width, height, ystride, uvstride, rgbstride, yuv_type);
-#endif
-}
-
-} // namespace media
diff --git a/src/media/base/yuv_convert.h b/src/media/base/yuv_convert.h
deleted file mode 100644
index afd47d7..0000000
--- a/src/media/base/yuv_convert.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_YUV_CONVERT_H_
-#define MEDIA_BASE_YUV_CONVERT_H_
-
-#include "base/basictypes.h"
-
-namespace media {
-
-// Type of YUV surface.
-// The value of these enums matter as they are used to shift vertical indices.
-enum YUVType {
- YV16 = 0, // YV16 is half width and full height chroma channels.
- YV12 = 1, // YV12 is half width and half height chroma channels.
-};
-
-// Mirror means flip the image horizontally, as in looking in a mirror.
-// Rotate happens after mirroring.
-enum Rotate {
- ROTATE_0, // Rotation off.
- ROTATE_90, // Rotate clockwise.
- ROTATE_180, // Rotate upside down.
- ROTATE_270, // Rotate counter clockwise.
- MIRROR_ROTATE_0, // Mirror horizontally.
- MIRROR_ROTATE_90, // Mirror then Rotate clockwise.
- MIRROR_ROTATE_180, // Mirror vertically.
- MIRROR_ROTATE_270, // Transpose.
-};
-
-// Filter affects how scaling looks.
-enum ScaleFilter {
- FILTER_NONE = 0, // No filter (point sampled).
- FILTER_BILINEAR_H = 1, // Bilinear horizontal filter.
- FILTER_BILINEAR_V = 2, // Bilinear vertical filter.
- FILTER_BILINEAR = 3, // Bilinear filter.
-};
-
-// Convert a frame of YUV to 32 bit ARGB.
-// Pass in YV16/YV12 depending on source format
-void ConvertYUVToRGB32(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type);
-
-// Scale a frame of YUV to 32 bit ARGB.
-// Supports rotation and mirroring.
-void ScaleYUVToRGB32(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int source_width,
- int source_height,
- int width,
- int height,
- int ystride,
- int uvstride,
- int rgbstride,
- YUVType yuv_type,
- Rotate view_rotate,
- ScaleFilter filter);
-
-// Biliner Scale a frame of YV12 to 32 bits ARGB on a specified rectangle.
-// |yplane|, etc and |rgbframe| should point to the top-left pixels of the
-// source and destination buffers.
-void ScaleYUVToRGB32WithRect(const uint8* yplane,
- const uint8* uplane,
- const uint8* vplane,
- uint8* rgbframe,
- int source_width,
- int source_height,
- int dest_width,
- int dest_height,
- int dest_rect_left,
- int dest_rect_top,
- int dest_rect_right,
- int dest_rect_bottom,
- int ystride,
- int uvstride,
- int rgbstride);
-
-void ConvertRGB32ToYUV(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-void ConvertRGB24ToYUV(const uint8* rgbframe,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height,
- int rgbstride,
- int ystride,
- int uvstride);
-
-void ConvertYUY2ToYUV(const uint8* src,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height);
-
-void ConvertNV21ToYUV(const uint8* src,
- uint8* yplane,
- uint8* uplane,
- uint8* vplane,
- int width,
- int height);
-
-// Empty SIMD register state after calling optimized scaler functions.
-// This method is only used in unit test after calling SIMD functions.
-void EmptyRegisterState();
-
-} // namespace media
-
-#endif // MEDIA_BASE_YUV_CONVERT_H_
diff --git a/src/media/base/yuv_convert_unittest.cc b/src/media/base/yuv_convert_unittest.cc
deleted file mode 100644
index c57f715..0000000
--- a/src/media/base/yuv_convert_unittest.cc
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/base_paths.h"
-#include "base/cpu.h"
-#include "base/file_util.h"
-#include "base/logging.h"
-#include "base/path_service.h"
-#include "media/base/djb2.h"
-#include "media/base/simd/convert_rgb_to_yuv.h"
-#include "media/base/simd/convert_yuv_to_rgb.h"
-#include "media/base/simd/filter_yuv.h"
-#include "media/base/yuv_convert.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gfx/rect.h"
-
-// Size of raw image.
-static const int kSourceWidth = 640;
-static const int kSourceHeight = 360;
-static const int kSourceYSize = kSourceWidth * kSourceHeight;
-static const int kSourceUOffset = kSourceYSize;
-static const int kSourceVOffset = kSourceYSize * 5 / 4;
-static const int kScaledWidth = 1024;
-static const int kScaledHeight = 768;
-static const int kDownScaledWidth = 512;
-static const int kDownScaledHeight = 320;
-static const int kBpp = 4;
-
-// Surface sizes for various test files.
-static const int kYUV12Size = kSourceYSize * 12 / 8;
-static const int kYUV16Size = kSourceYSize * 16 / 8;
-static const int kYUY2Size = kSourceYSize * 16 / 8;
-static const int kRGBSize = kSourceYSize * kBpp;
-static const int kRGBSizeScaled = kScaledWidth * kScaledHeight * kBpp;
-static const int kRGB24Size = kSourceYSize * 3;
-static const int kRGBSizeConverted = kSourceYSize * kBpp;
-
-// Helper for reading test data into a scoped_array<uint8>.
-static void ReadData(const FilePath::CharType* filename,
- int expected_size,
- scoped_array<uint8>* data) {
- data->reset(new uint8[expected_size]);
-
- FilePath path;
- CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &path));
- path = path.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .Append(filename);
-
- // Verify file size is correct.
- int64 actual_size = 0;
- file_util::GetFileSize(path, &actual_size);
- CHECK_EQ(actual_size, expected_size);
-
- // Verify bytes read are correct.
- int bytes_read = file_util::ReadFile(
- path, reinterpret_cast<char*>(data->get()), expected_size);
- CHECK_EQ(bytes_read, expected_size);
-}
-
-static void ReadYV12Data(scoped_array<uint8>* data) {
- ReadData(FILE_PATH_LITERAL("bali_640x360_P420.yuv"), kYUV12Size, data);
-}
-
-static void ReadYV16Data(scoped_array<uint8>* data) {
- ReadData(FILE_PATH_LITERAL("bali_640x360_P422.yuv"), kYUV16Size, data);
-}
-
-static void ReadRGB24Data(scoped_array<uint8>* data) {
- ReadData(FILE_PATH_LITERAL("bali_640x360_RGB24.rgb"), kRGB24Size, data);
-}
-
-static void ReadYUY2Data(scoped_array<uint8>* data) {
- ReadData(FILE_PATH_LITERAL("bali_640x360_YUY2.yuv"), kYUY2Size, data);
-}
-
-TEST(YUVConvertTest, YV12) {
- // Allocate all surfaces.
- scoped_array<uint8> yuv_bytes;
- scoped_array<uint8> rgb_bytes(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_converted_bytes(new uint8[kRGBSizeConverted]);
-
- // Read YUV reference data from file.
- ReadYV12Data(&yuv_bytes);
-
- // Convert a frame of YUV to 32 bit ARGB.
- media::ConvertYUVToRGB32(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_converted_bytes.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- media::YV12);
-
- uint32 rgb_hash = DJB2Hash(rgb_converted_bytes.get(), kRGBSizeConverted,
- kDJB2HashSeed);
- EXPECT_EQ(2413171226u, rgb_hash);
-}
-
-TEST(YUVConvertTest, YV16) {
- // Allocate all surfaces.
- scoped_array<uint8> yuv_bytes;
- scoped_array<uint8> rgb_bytes(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_converted_bytes(new uint8[kRGBSizeConverted]);
-
- // Read YUV reference data from file.
- ReadYV16Data(&yuv_bytes);
-
- // Convert a frame of YUV to 32 bit ARGB.
- media::ConvertYUVToRGB32(yuv_bytes.get(), // Y
- yuv_bytes.get() + kSourceUOffset, // U
- yuv_bytes.get() + kSourceYSize * 3 / 2, // V
- rgb_converted_bytes.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- media::YV16);
-
- uint32 rgb_hash = DJB2Hash(rgb_converted_bytes.get(), kRGBSizeConverted,
- kDJB2HashSeed);
- EXPECT_EQ(4222342047u, rgb_hash);
-}
-
-struct YUVScaleTestData {
- YUVScaleTestData(media::YUVType y, media::ScaleFilter s, uint32 r)
- : yuv_type(y),
- scale_filter(s),
- rgb_hash(r) {
- }
-
- media::YUVType yuv_type;
- media::ScaleFilter scale_filter;
- uint32 rgb_hash;
-};
-
-class YUVScaleTest : public ::testing::TestWithParam<YUVScaleTestData> {
- public:
- YUVScaleTest() {
- switch (GetParam().yuv_type) {
- case media::YV12:
- ReadYV12Data(&yuv_bytes_);
- break;
- case media::YV16:
- ReadYV16Data(&yuv_bytes_);
- break;
- }
-
- rgb_bytes_.reset(new uint8[kRGBSizeScaled]);
- }
-
- // Helpers for getting the proper Y, U and V plane offsets.
- uint8* y_plane() { return yuv_bytes_.get(); }
- uint8* u_plane() { return yuv_bytes_.get() + kSourceYSize; }
- uint8* v_plane() {
- switch (GetParam().yuv_type) {
- case media::YV12:
- return yuv_bytes_.get() + kSourceVOffset;
- case media::YV16:
- return yuv_bytes_.get() + kSourceYSize * 3 / 2;
- }
- return NULL;
- }
-
- scoped_array<uint8> yuv_bytes_;
- scoped_array<uint8> rgb_bytes_;
-};
-
-TEST_P(YUVScaleTest, NoScale) {
- media::ScaleYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kSourceWidth * kBpp, // RgbStride
- GetParam().yuv_type,
- media::ROTATE_0,
- GetParam().scale_filter);
-
- uint32 yuv_hash = DJB2Hash(rgb_bytes_.get(), kRGBSize, kDJB2HashSeed);
-
- media::ConvertYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- GetParam().yuv_type);
-
- uint32 rgb_hash = DJB2Hash(rgb_bytes_.get(), kRGBSize, kDJB2HashSeed);
-
- EXPECT_EQ(yuv_hash, rgb_hash);
-}
-
-TEST_P(YUVScaleTest, Normal) {
- media::ScaleYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kScaledWidth, kScaledHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kScaledWidth * kBpp, // RgbStride
- GetParam().yuv_type,
- media::ROTATE_0,
- GetParam().scale_filter);
-
- uint32 rgb_hash = DJB2Hash(rgb_bytes_.get(), kRGBSizeScaled, kDJB2HashSeed);
- EXPECT_EQ(GetParam().rgb_hash, rgb_hash);
-}
-
-TEST_P(YUVScaleTest, ZeroSourceSize) {
- media::ScaleYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- 0, 0, // Dimensions
- kScaledWidth, kScaledHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kScaledWidth * kBpp, // RgbStride
- GetParam().yuv_type,
- media::ROTATE_0,
- GetParam().scale_filter);
-
- // Testing for out-of-bound read/writes with AddressSanitizer.
-}
-
-TEST_P(YUVScaleTest, ZeroDestinationSize) {
- media::ScaleYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- 0, 0, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kScaledWidth * kBpp, // RgbStride
- GetParam().yuv_type,
- media::ROTATE_0,
- GetParam().scale_filter);
-
- // Testing for out-of-bound read/writes with AddressSanitizer.
-}
-
-TEST_P(YUVScaleTest, OddWidthAndHeightNotCrash) {
- media::ScaleYUVToRGB32(y_plane(), // Y
- u_plane(), // U
- v_plane(), // V
- rgb_bytes_.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- 3, 3, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kScaledWidth * kBpp, // RgbStride
- GetParam().yuv_type,
- media::ROTATE_0,
- GetParam().scale_filter);
-}
-
-INSTANTIATE_TEST_CASE_P(
- YUVScaleFormats, YUVScaleTest,
- ::testing::Values(
- YUVScaleTestData(media::YV12, media::FILTER_NONE, 4136904952u),
- YUVScaleTestData(media::YV16, media::FILTER_NONE, 1501777547u),
- YUVScaleTestData(media::YV12, media::FILTER_BILINEAR, 3164274689u),
- YUVScaleTestData(media::YV16, media::FILTER_BILINEAR, 3095878046u)));
-
-// This tests a known worst case YUV value, and for overflow.
-TEST(YUVConvertTest, Clamp) {
- // Allocate all surfaces.
- scoped_array<uint8> yuv_bytes(new uint8[1]);
- scoped_array<uint8> rgb_bytes(new uint8[1]);
- scoped_array<uint8> rgb_converted_bytes(new uint8[1]);
-
- // Values that failed previously in bug report.
- unsigned char y = 255u;
- unsigned char u = 255u;
- unsigned char v = 19u;
-
- // Prefill extra large destination buffer to test for overflow.
- unsigned char rgb[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
- unsigned char expected[8] = { 255, 255, 104, 255, 4, 5, 6, 7 };
- // Convert a frame of YUV to 32 bit ARGB.
- media::ConvertYUVToRGB32(&y, // Y
- &u, // U
- &v, // V
- &rgb[0], // RGB output
- 1, 1, // Dimensions
- 0, // YStride
- 0, // UVStride
- 0, // RGBStride
- media::YV12);
-
- int expected_test = memcmp(rgb, expected, sizeof(expected));
- EXPECT_EQ(0, expected_test);
-}
-
-TEST(YUVConvertTest, RGB24ToYUV) {
- // Allocate all surfaces.
- scoped_array<uint8> rgb_bytes;
- scoped_array<uint8> yuv_converted_bytes(new uint8[kYUV12Size]);
-
- // Read RGB24 reference data from file.
- ReadRGB24Data(&rgb_bytes);
-
- // Convert to I420.
- media::ConvertRGB24ToYUV(rgb_bytes.get(),
- yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth * 3, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- uint32 rgb_hash = DJB2Hash(yuv_converted_bytes.get(), kYUV12Size,
- kDJB2HashSeed);
- EXPECT_EQ(320824432u, rgb_hash);
-}
-
-TEST(YUVConvertTest, RGB32ToYUV) {
- // Allocate all surfaces.
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes(new uint8[kRGBSize]);
- scoped_array<uint8> yuv_converted_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_converted_bytes(new uint8[kRGBSize]);
-
- // Read YUV reference data from file.
- FilePath yuv_url;
- EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &yuv_url));
- yuv_url = yuv_url.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
- EXPECT_EQ(static_cast<int>(kYUV12Size),
- file_util::ReadFile(yuv_url,
- reinterpret_cast<char*>(yuv_bytes.get()),
- static_cast<int>(kYUV12Size)));
-
- // Convert a frame of YUV to 32 bit ARGB.
- media::ConvertYUVToRGB32(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- media::YV12);
-
- // Convert RGB32 to YV12.
- media::ConvertRGB32ToYUV(rgb_bytes.get(),
- yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth * 4, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- // Convert YV12 back to RGB32.
- media::ConvertYUVToRGB32(yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- rgb_converted_bytes.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- media::YV12);
-
- int error = 0;
- for (int i = 0; i < kRGBSize; ++i) {
- int diff = rgb_converted_bytes[i] - rgb_bytes[i];
- if (diff < 0)
- diff = -diff;
- error += diff;
- }
-
- // Make sure error is within bound.
- DVLOG(1) << "Average error per channel: " << error / kRGBSize;
- EXPECT_GT(5, error / kRGBSize);
-}
-
-TEST(YUVConvertTest, YUY2ToYUV) {
- // Allocate all surfaces.
- scoped_array<uint8> yuy_bytes;
- scoped_array<uint8> yuv_converted_bytes(new uint8[kYUV12Size]);
-
- // Read YUY reference data from file.
- ReadYUY2Data(&yuy_bytes);
-
- // Convert to I420.
- media::ConvertYUY2ToYUV(yuy_bytes.get(),
- yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- kSourceWidth, kSourceHeight);
-
- uint32 yuy_hash = DJB2Hash(yuv_converted_bytes.get(), kYUV12Size,
- kDJB2HashSeed);
- EXPECT_EQ(666823187u, yuy_hash);
-}
-
-TEST(YUVConvertTest, DownScaleYUVToRGB32WithRect) {
- // Read YUV reference data from file.
- FilePath yuv_url;
- EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &yuv_url));
- yuv_url = yuv_url.Append(FILE_PATH_LITERAL("media"))
- .Append(FILE_PATH_LITERAL("test"))
- .Append(FILE_PATH_LITERAL("data"))
- .Append(FILE_PATH_LITERAL("bali_640x360_P420.yuv"));
- const size_t size_of_yuv = kSourceYSize * 12 / 8; // 12 bpp.
- scoped_array<uint8> yuv_bytes(new uint8[size_of_yuv]);
- EXPECT_EQ(static_cast<int>(size_of_yuv),
- file_util::ReadFile(yuv_url,
- reinterpret_cast<char*>(yuv_bytes.get()),
- static_cast<int>(size_of_yuv)));
-
- // Scale the full frame of YUV to 32 bit ARGB.
- // The API currently only supports down-scaling, so we don't test up-scaling.
- const size_t size_of_rgb_scaled = kDownScaledWidth * kDownScaledHeight * kBpp;
- scoped_array<uint8> rgb_scaled_bytes(new uint8[size_of_rgb_scaled]);
- gfx::Rect sub_rect(0, 0, kDownScaledWidth, kDownScaledHeight);
-
- // We can't compare with the full-frame scaler because it uses slightly
- // different sampling coordinates.
- media::ScaleYUVToRGB32WithRect(
- yuv_bytes.get(), // Y
- yuv_bytes.get() + kSourceUOffset, // U
- yuv_bytes.get() + kSourceVOffset, // V
- rgb_scaled_bytes.get(), // Rgb output
- kSourceWidth, kSourceHeight, // Dimensions
- kDownScaledWidth, kDownScaledHeight, // Dimensions
- sub_rect.x(), sub_rect.y(), // Dest rect
- sub_rect.right(), sub_rect.bottom(), // Dest rect
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kDownScaledWidth * kBpp); // RgbStride
-
- uint32 rgb_hash_full_rect = DJB2Hash(rgb_scaled_bytes.get(),
- size_of_rgb_scaled,
- kDJB2HashSeed);
-
- // Re-scale sub-rectangles and verify the results are the same.
- int next_sub_rect = 0;
- while (!sub_rect.IsEmpty()) {
- // Scale a partial rectangle.
- media::ScaleYUVToRGB32WithRect(
- yuv_bytes.get(), // Y
- yuv_bytes.get() + kSourceUOffset, // U
- yuv_bytes.get() + kSourceVOffset, // V
- rgb_scaled_bytes.get(), // Rgb output
- kSourceWidth, kSourceHeight, // Dimensions
- kDownScaledWidth, kDownScaledHeight, // Dimensions
- sub_rect.x(), sub_rect.y(), // Dest rect
- sub_rect.right(), sub_rect.bottom(), // Dest rect
- kSourceWidth, // YStride
- kSourceWidth / 2, // UvStride
- kDownScaledWidth * kBpp); // RgbStride
- uint32 rgb_hash_sub_rect = DJB2Hash(rgb_scaled_bytes.get(),
- size_of_rgb_scaled,
- kDJB2HashSeed);
-
- EXPECT_EQ(rgb_hash_full_rect, rgb_hash_sub_rect);
-
- // Now pick choose a quarter rect of this sub-rect.
- if (next_sub_rect & 1)
- sub_rect.set_x(sub_rect.x() + sub_rect.width() / 2);
- if (next_sub_rect & 2)
- sub_rect.set_y(sub_rect.y() + sub_rect.height() / 2);
- sub_rect.set_width(sub_rect.width() / 2);
- sub_rect.set_height(sub_rect.height() / 2);
- next_sub_rect++;
- }
-}
-
-#if !defined(ARCH_CPU_ARM_FAMILY)
-TEST(YUVConvertTest, RGB32ToYUV_SSE2_MatchReference) {
- base::CPU cpu;
- if (!cpu.has_sse2()) {
- LOG(WARNING) << "System doesn't support SSE2, test not executed.";
- return;
- }
-
- // Allocate all surfaces.
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes(new uint8[kRGBSize]);
- scoped_array<uint8> yuv_converted_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> yuv_reference_bytes(new uint8[kYUV12Size]);
-
- ReadYV12Data(&yuv_bytes);
-
- // Convert a frame of YUV to 32 bit ARGB.
- media::ConvertYUVToRGB32(
- yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes.get(), // RGB output
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth, // YStride
- kSourceWidth / 2, // UVStride
- kSourceWidth * kBpp, // RGBStride
- media::YV12);
-
- // Convert RGB32 to YV12 with SSE2 version.
- media::ConvertRGB32ToYUV_SSE2(
- rgb_bytes.get(),
- yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth * 4, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- // Convert RGB32 to YV12 with reference version.
- media::ConvertRGB32ToYUV_SSE2_Reference(
- rgb_bytes.get(),
- yuv_reference_bytes.get(),
- yuv_reference_bytes.get() + kSourceUOffset,
- yuv_reference_bytes.get() + kSourceVOffset,
- kSourceWidth, kSourceHeight, // Dimensions
- kSourceWidth * 4, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- // Now convert a odd width and height, this overrides part of the buffer
- // generated above but that is fine because the point of this test is to
- // match the result with the reference code.
-
- // Convert RGB32 to YV12 with SSE2 version.
- media::ConvertRGB32ToYUV_SSE2(
- rgb_bytes.get(),
- yuv_converted_bytes.get(),
- yuv_converted_bytes.get() + kSourceUOffset,
- yuv_converted_bytes.get() + kSourceVOffset,
- 7, 7, // Dimensions
- kSourceWidth * 4, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- // Convert RGB32 to YV12 with reference version.
- media::ConvertRGB32ToYUV_SSE2_Reference(
- rgb_bytes.get(),
- yuv_reference_bytes.get(),
- yuv_reference_bytes.get() + kSourceUOffset,
- yuv_reference_bytes.get() + kSourceVOffset,
- 7, 7, // Dimensions
- kSourceWidth * 4, // RGBStride
- kSourceWidth, // YStride
- kSourceWidth / 2); // UVStride
-
- int error = 0;
- for (int i = 0; i < kYUV12Size; ++i) {
- int diff = yuv_reference_bytes[i] - yuv_converted_bytes[i];
- if (diff < 0)
- diff = -diff;
- error += diff;
- }
-
- // Make sure there's no difference from the reference.
- EXPECT_EQ(0, error);
-}
-
-TEST(YUVConvertTest, ConvertYUVToRGB32Row_MMX) {
- base::CPU cpu;
- if (!cpu.has_mmx()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- ConvertYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth);
- ConvertYUVToRGB32Row_MMX(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, ConvertYUVToRGB32Row_SSE) {
- base::CPU cpu;
- if (!cpu.has_sse()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- ConvertYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth);
- ConvertYUVToRGB32Row_SSE(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, ScaleYUVToRGB32Row_MMX) {
- base::CPU cpu;
- if (!cpu.has_mmx()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- ScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- ScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE) {
- base::CPU cpu;
- if (!cpu.has_sse()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- ScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- ScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX) {
- base::CPU cpu;
- if (!cpu.has_mmx()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- LinearScaleYUVToRGB32Row_MMX(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_SSE) {
- base::CPU cpu;
- if (!cpu.has_sse()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- LinearScaleYUVToRGB32Row_SSE(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, FilterYUVRows_C_OutOfBounds) {
- scoped_array<uint8> src(new uint8[16]);
- scoped_array<uint8> dst(new uint8[16]);
-
- memset(src.get(), 0xff, 16);
- memset(dst.get(), 0, 16);
-
- media::FilterYUVRows_C(dst.get(), src.get(), src.get(), 1, 255);
-
- EXPECT_EQ(255u, dst[0]);
- for (int i = 1; i < 16; ++i) {
- EXPECT_EQ(0u, dst[i]) << " not equal at " << i;
- }
-}
-
-TEST(YUVConvertTest, FilterYUVRows_MMX_OutOfBounds) {
- base::CPU cpu;
- if (!cpu.has_mmx()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> src(new uint8[16]);
- scoped_array<uint8> dst(new uint8[16]);
-
- memset(src.get(), 0xff, 16);
- memset(dst.get(), 0, 16);
-
- media::FilterYUVRows_MMX(dst.get(), src.get(), src.get(), 1, 255);
- media::EmptyRegisterState();
-
- EXPECT_EQ(255u, dst[0]);
- for (int i = 1; i < 16; ++i) {
- EXPECT_EQ(0u, dst[i]);
- }
-}
-
-TEST(YUVConvertTest, FilterYUVRows_SSE2_OutOfBounds) {
- base::CPU cpu;
- if (!cpu.has_sse2()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- scoped_array<uint8> src(new uint8[16]);
- scoped_array<uint8> dst(new uint8[16]);
-
- memset(src.get(), 0xff, 16);
- memset(dst.get(), 0, 16);
-
- media::FilterYUVRows_SSE2(dst.get(), src.get(), src.get(), 1, 255);
-
- EXPECT_EQ(255u, dst[0]);
- for (int i = 1; i < 16; ++i) {
- EXPECT_EQ(0u, dst[i]);
- }
-}
-
-TEST(YUVConvertTest, FilterYUVRows_MMX_UnalignedDestination) {
- base::CPU cpu;
- if (!cpu.has_mmx()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- const int kSize = 32;
- scoped_array<uint8> src(new uint8[kSize]);
- scoped_array<uint8> dst_sample(new uint8[kSize]);
- scoped_array<uint8> dst(new uint8[kSize]);
-
- memset(dst_sample.get(), 0, kSize);
- memset(dst.get(), 0, kSize);
- for (int i = 0; i < kSize; ++i)
- src[i] = 100 + i;
-
- media::FilterYUVRows_C(dst_sample.get(),
- src.get(), src.get(), 17, 128);
-
- // Generate an unaligned output address.
- uint8* dst_ptr =
- reinterpret_cast<uint8*>(
- (reinterpret_cast<uintptr_t>(dst.get() + 8) & ~7) + 1);
- media::FilterYUVRows_MMX(dst_ptr, src.get(), src.get(), 17, 128);
- media::EmptyRegisterState();
-
- EXPECT_EQ(0, memcmp(dst_sample.get(), dst_ptr, 17));
-}
-
-TEST(YUVConvertTest, FilterYUVRows_SSE2_UnalignedDestination) {
- base::CPU cpu;
- if (!cpu.has_sse2()) {
- LOG(WARNING) << "System not supported. Test skipped.";
- return;
- }
-
- const int kSize = 64;
- scoped_array<uint8> src(new uint8[kSize]);
- scoped_array<uint8> dst_sample(new uint8[kSize]);
- scoped_array<uint8> dst(new uint8[kSize]);
-
- memset(dst_sample.get(), 0, kSize);
- memset(dst.get(), 0, kSize);
- for (int i = 0; i < kSize; ++i)
- src[i] = 100 + i;
-
- media::FilterYUVRows_C(dst_sample.get(),
- src.get(), src.get(), 37, 128);
-
- // Generate an unaligned output address.
- uint8* dst_ptr =
- reinterpret_cast<uint8*>(
- (reinterpret_cast<uintptr_t>(dst.get() + 16) & ~15) + 1);
- media::FilterYUVRows_SSE2(dst_ptr, src.get(), src.get(), 37, 128);
- media::EmptyRegisterState();
-
- EXPECT_EQ(0, memcmp(dst_sample.get(), dst_ptr, 37));
-}
-
-#if defined(ARCH_CPU_X86_64)
-
-TEST(YUVConvertTest, ScaleYUVToRGB32Row_SSE2_X64) {
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- ScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- ScaleYUVToRGB32Row_SSE2_X64(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-TEST(YUVConvertTest, LinearScaleYUVToRGB32Row_MMX_X64) {
- scoped_array<uint8> yuv_bytes(new uint8[kYUV12Size]);
- scoped_array<uint8> rgb_bytes_reference(new uint8[kRGBSize]);
- scoped_array<uint8> rgb_bytes_converted(new uint8[kRGBSize]);
- ReadYV12Data(&yuv_bytes);
-
- const int kWidth = 167;
- const int kSourceDx = 80000; // This value means a scale down.
- LinearScaleYUVToRGB32Row_C(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_reference.get(),
- kWidth,
- kSourceDx);
- LinearScaleYUVToRGB32Row_MMX_X64(yuv_bytes.get(),
- yuv_bytes.get() + kSourceUOffset,
- yuv_bytes.get() + kSourceVOffset,
- rgb_bytes_converted.get(),
- kWidth,
- kSourceDx);
- media::EmptyRegisterState();
- EXPECT_EQ(0, memcmp(rgb_bytes_reference.get(),
- rgb_bytes_converted.get(),
- kWidth * kBpp));
-}
-
-#endif // defined(ARCH_CPU_X86_64)
-
-#endif // defined(ARCH_CPU_X86_FAMILY)
diff --git a/src/media/crypto/aes_decryptor.cc b/src/media/crypto/aes_decryptor.cc
deleted file mode 100644
index 5431a76..0000000
--- a/src/media/crypto/aes_decryptor.cc
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/crypto/aes_decryptor.h"
-
-#include <vector>
-
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/string_number_conversions.h"
-#include "crypto/encryptor.h"
-#include "crypto/symmetric_key.h"
-#include "media/base/audio_decoder_config.h"
-#include "media/base/decoder_buffer.h"
-#include "media/base/decrypt_config.h"
-#include "media/base/decryptor_client.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-uint32 AesDecryptor::next_session_id_ = 1;
-
-enum ClearBytesBufferSel {
- kSrcContainsClearBytes,
- kDstContainsClearBytes
-};
-
-static void CopySubsamples(const std::vector<SubsampleEntry>& subsamples,
- const ClearBytesBufferSel sel,
- const uint8* src,
- uint8* dst) {
- for (size_t i = 0; i < subsamples.size(); i++) {
- const SubsampleEntry& subsample = subsamples[i];
- if (sel == kSrcContainsClearBytes) {
- src += subsample.clear_bytes;
- } else {
- dst += subsample.clear_bytes;
- }
- memcpy(dst, src, subsample.cypher_bytes);
- src += subsample.cypher_bytes;
- dst += subsample.cypher_bytes;
- }
-}
-
-// Decrypts |input| using |key|. Returns a DecoderBuffer with the decrypted
-// data if decryption succeeded or NULL if decryption failed.
-static scoped_refptr<DecoderBuffer> DecryptData(const DecoderBuffer& input,
- crypto::SymmetricKey* key) {
- CHECK(input.GetDataSize());
- CHECK(input.GetDecryptConfig());
- CHECK(key);
-
- crypto::Encryptor encryptor;
- if (!encryptor.Init(key, crypto::Encryptor::CTR, "")) {
- DVLOG(1) << "Could not initialize decryptor.";
- return NULL;
- }
-
- DCHECK_EQ(input.GetDecryptConfig()->iv().size(),
- static_cast<size_t>(DecryptConfig::kDecryptionKeySize));
- if (!encryptor.SetCounter(input.GetDecryptConfig()->iv())) {
- DVLOG(1) << "Could not set counter block.";
- return NULL;
- }
-
- const int data_offset = input.GetDecryptConfig()->data_offset();
- const char* sample =
- reinterpret_cast<const char*>(input.GetData() + data_offset);
- DCHECK_GT(input.GetDataSize(), data_offset);
- size_t sample_size = static_cast<size_t>(input.GetDataSize() - data_offset);
-
- DCHECK_GT(sample_size, 0U) << "No sample data to be decrypted.";
- if (sample_size == 0)
- return NULL;
-
- if (input.GetDecryptConfig()->subsamples().empty()) {
- std::string decrypted_text;
- base::StringPiece encrypted_text(sample, sample_size);
- if (!encryptor.Decrypt(encrypted_text, &decrypted_text)) {
- DVLOG(1) << "Could not decrypt data.";
- return NULL;
- }
-
- // TODO(xhwang): Find a way to avoid this data copy.
- return DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(decrypted_text.data()),
- decrypted_text.size());
- }
-
- const std::vector<SubsampleEntry>& subsamples =
- input.GetDecryptConfig()->subsamples();
-
- size_t total_clear_size = 0;
- size_t total_encrypted_size = 0;
- for (size_t i = 0; i < subsamples.size(); i++) {
- total_clear_size += subsamples[i].clear_bytes;
- total_encrypted_size += subsamples[i].cypher_bytes;
- // Check for overflow. This check is valid because *_size is unsigned.
- DCHECK(total_clear_size >= subsamples[i].clear_bytes);
- if (total_encrypted_size < subsamples[i].cypher_bytes)
- return NULL;
- }
- size_t total_size = total_clear_size + total_encrypted_size;
- if (total_size < total_clear_size || total_size != sample_size) {
- DVLOG(1) << "Subsample sizes do not equal input size";
- return NULL;
- }
-
- // The encrypted portions of all subsamples must form a contiguous block,
- // such that an encrypted subsample that ends away from a block boundary is
- // immediately followed by the start of the next encrypted subsample. We
- // copy all encrypted subsamples to a contiguous buffer, decrypt them, then
- // copy the decrypted bytes over the encrypted bytes in the output.
- // TODO(strobe): attempt to reduce number of memory copies
- scoped_array<uint8> encrypted_bytes(new uint8[total_encrypted_size]);
- CopySubsamples(subsamples, kSrcContainsClearBytes,
- reinterpret_cast<const uint8*>(sample), encrypted_bytes.get());
-
- base::StringPiece encrypted_text(
- reinterpret_cast<const char*>(encrypted_bytes.get()),
- total_encrypted_size);
- std::string decrypted_text;
- if (!encryptor.Decrypt(encrypted_text, &decrypted_text)) {
- DVLOG(1) << "Could not decrypt data.";
- return NULL;
- }
-
- scoped_refptr<DecoderBuffer> output = DecoderBuffer::CopyFrom(
- reinterpret_cast<const uint8*>(sample), sample_size);
- CopySubsamples(subsamples, kDstContainsClearBytes,
- reinterpret_cast<const uint8*>(decrypted_text.data()),
- output->GetWritableData());
- return output;
-}
-
-AesDecryptor::AesDecryptor(DecryptorClient* client)
- : client_(client) {
-}
-
-AesDecryptor::~AesDecryptor() {
- STLDeleteValues(&key_map_);
-}
-
-bool AesDecryptor::GenerateKeyRequest(const std::string& key_system,
- const std::string& type,
- const uint8* init_data,
- int init_data_length) {
- std::string session_id_string(base::UintToString(next_session_id_++));
-
- // For now, the AesDecryptor does not care about |key_system| and |type|;
- // just fire the event with the |init_data| as the request.
- std::string message;
- if (init_data && init_data_length) {
- message = std::string(reinterpret_cast<const char*>(init_data),
- init_data_length);
- }
-
- client_->KeyMessage(key_system, session_id_string, message, "");
- return true;
-}
-
-void AesDecryptor::AddKey(const std::string& key_system,
- const uint8* key,
- int key_length,
- const uint8* init_data,
- int init_data_length,
- const std::string& session_id) {
- CHECK(key);
- CHECK_GT(key_length, 0);
-
- // TODO(xhwang): Add |session_id| check after we figure out how:
- // https://www.w3.org/Bugs/Public/show_bug.cgi?id=16550
- if (key_length != DecryptConfig::kDecryptionKeySize) {
- DVLOG(1) << "Invalid key length: " << key_length;
- client_->KeyError(key_system, session_id, Decryptor::kUnknownError, 0);
- return;
- }
-
- // TODO(xhwang): Fix the decryptor to accept no |init_data|. See
- // http://crbug.com/123265. Until then, ensure a non-empty value is passed.
- static const uint8 kDummyInitData[1] = { 0 };
- if (!init_data) {
- init_data = kDummyInitData;
- init_data_length = arraysize(kDummyInitData);
- }
-
- // TODO(xhwang): For now, use |init_data| for key ID. Make this more spec
- // compliant later (http://crbug.com/123262, http://crbug.com/123265).
- std::string key_id_string(reinterpret_cast<const char*>(init_data),
- init_data_length);
- std::string key_string(reinterpret_cast<const char*>(key) , key_length);
- scoped_ptr<DecryptionKey> decryption_key(new DecryptionKey(key_string));
- if (!decryption_key.get()) {
- DVLOG(1) << "Could not create key.";
- client_->KeyError(key_system, session_id, Decryptor::kUnknownError, 0);
- return;
- }
-
- if (!decryption_key->Init()) {
- DVLOG(1) << "Could not initialize decryption key.";
- client_->KeyError(key_system, session_id, Decryptor::kUnknownError, 0);
- return;
- }
-
- SetKey(key_id_string, decryption_key.Pass());
-
- if (!audio_key_added_cb_.is_null())
- audio_key_added_cb_.Run();
-
- if (!video_key_added_cb_.is_null())
- video_key_added_cb_.Run();
-
- client_->KeyAdded(key_system, session_id);
-}
-
-void AesDecryptor::CancelKeyRequest(const std::string& key_system,
- const std::string& session_id) {
-}
-
-void AesDecryptor::RegisterKeyAddedCB(StreamType stream_type,
- const KeyAddedCB& key_added_cb) {
- switch (stream_type) {
- case kAudio:
- audio_key_added_cb_ = key_added_cb;
- break;
- case kVideo:
- video_key_added_cb_ = key_added_cb;
- break;
- default:
- NOTREACHED();
- }
-}
-
-void AesDecryptor::Decrypt(StreamType stream_type,
- const scoped_refptr<DecoderBuffer>& encrypted,
- const DecryptCB& decrypt_cb) {
- CHECK(encrypted->GetDecryptConfig());
- const std::string& key_id = encrypted->GetDecryptConfig()->key_id();
-
- DecryptionKey* key = GetKey(key_id);
- if (!key) {
- DVLOG(1) << "Could not find a matching key for the given key ID.";
- decrypt_cb.Run(kNoKey, NULL);
- return;
- }
-
- scoped_refptr<DecoderBuffer> decrypted;
- // An empty iv string signals that the frame is unencrypted.
- if (encrypted->GetDecryptConfig()->iv().empty()) {
- int data_offset = encrypted->GetDecryptConfig()->data_offset();
- decrypted = DecoderBuffer::CopyFrom(encrypted->GetData() + data_offset,
- encrypted->GetDataSize() - data_offset);
- } else {
- crypto::SymmetricKey* decryption_key = key->decryption_key();
- decrypted = DecryptData(*encrypted, decryption_key);
- if (!decrypted) {
- DVLOG(1) << "Decryption failed.";
- decrypt_cb.Run(kError, NULL);
- return;
- }
- }
-
- decrypted->SetTimestamp(encrypted->GetTimestamp());
- decrypted->SetDuration(encrypted->GetDuration());
- decrypt_cb.Run(kSuccess, decrypted);
-}
-
-void AesDecryptor::CancelDecrypt(StreamType stream_type) {
- // Decrypt() calls the DecryptCB synchronously so there's nothing to cancel.
-}
-
-void AesDecryptor::InitializeAudioDecoder(scoped_ptr<AudioDecoderConfig> config,
- const DecoderInitCB& init_cb) {
- // AesDecryptor does not support audio decoding.
- init_cb.Run(false);
-}
-
-void AesDecryptor::InitializeVideoDecoder(scoped_ptr<VideoDecoderConfig> config,
- const DecoderInitCB& init_cb) {
- // AesDecryptor does not support video decoding.
- init_cb.Run(false);
-}
-
-void AesDecryptor::DecryptAndDecodeAudio(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const AudioDecodeCB& audio_decode_cb) {
- NOTREACHED() << "AesDecryptor does not support audio decoding";
-}
-
-void AesDecryptor::DecryptAndDecodeVideo(
- const scoped_refptr<DecoderBuffer>& encrypted,
- const VideoDecodeCB& video_decode_cb) {
- NOTREACHED() << "AesDecryptor does not support video decoding";
-}
-
-void AesDecryptor::ResetDecoder(StreamType stream_type) {
- NOTREACHED() << "AesDecryptor does not support audio/video decoding";
-}
-
-void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
- NOTREACHED() << "AesDecryptor does not support audio/video decoding";
-}
-
-void AesDecryptor::SetKey(const std::string& key_id,
- scoped_ptr<DecryptionKey> decryption_key) {
- base::AutoLock auto_lock(key_map_lock_);
- KeyMap::iterator found = key_map_.find(key_id);
- if (found != key_map_.end()) {
- delete found->second;
- key_map_.erase(found);
- }
- key_map_[key_id] = decryption_key.release();
-}
-
-AesDecryptor::DecryptionKey* AesDecryptor::GetKey(
- const std::string& key_id) const {
- base::AutoLock auto_lock(key_map_lock_);
- KeyMap