Import Cobalt 4.12096
diff --git a/src/cobalt/base/deep_link_event.h b/src/cobalt/base/deep_link_event.h
index 5e3df4b..b5c2afa 100644
--- a/src/cobalt/base/deep_link_event.h
+++ b/src/cobalt/base/deep_link_event.h
@@ -20,6 +20,7 @@
#include <string>
#include "base/compiler_specific.h"
+#include "base/string_util.h"
#include "cobalt/base/event.h"
namespace base {
@@ -28,6 +29,9 @@
public:
explicit DeepLinkEvent(const std::string& link) : link_(link) {}
const std::string& link() const { return link_; }
+ bool IsH5vccLink() const {
+ return StartsWithASCII(link_, "h5vcc", true);
+ }
BASE_EVENT_SUBCLASS(DeepLinkEvent);
diff --git a/src/cobalt/base/user_log.h b/src/cobalt/base/user_log.h
index 4c0b99f..5c47910 100644
--- a/src/cobalt/base/user_log.h
+++ b/src/cobalt/base/user_log.h
@@ -32,6 +32,8 @@
kAppStatusIndex,
kAppSuspendCountIndex,
kAppResumeCountIndex,
+ kAppPauseCountIndex,
+ kAppUnpauseCountIndex,
kNetworkStatusIndex,
kNetworkConnectCountIndex,
kNetworkDisconnectCountIndex,
diff --git a/src/cobalt/browser/application.cc b/src/cobalt/browser/application.cc
index 7559d56..bde2b8f 100644
--- a/src/cobalt/browser/application.cc
+++ b/src/cobalt/browser/application.cc
@@ -244,6 +244,8 @@
Application::AppStatus Application::app_status_ =
Application::kUninitializedAppStatus;
+int Application::app_pause_count_ = 0;
+int Application::app_unpause_count_ = 0;
int Application::app_suspend_count_ = 0;
int Application::app_resume_count_ = 0;
@@ -428,6 +430,10 @@
base::Bind(&Application::OnApplicationEvent, base::Unretained(this));
event_dispatcher_.AddEventCallback(system_window::ApplicationEvent::TypeId(),
application_event_callback_);
+ deep_link_event_callback_ =
+ base::Bind(&Application::OnDeepLinkEvent, base::Unretained(this));
+ event_dispatcher_.AddEventCallback(base::DeepLinkEvent::TypeId(),
+ deep_link_event_callback_);
#if defined(ENABLE_WEBDRIVER)
#if defined(ENABLE_DEBUG_COMMAND_LINE_SWITCHES)
@@ -477,6 +483,8 @@
network_event_callback_);
event_dispatcher_.RemoveEventCallback(
system_window::ApplicationEvent::TypeId(), application_event_callback_);
+ event_dispatcher_.RemoveEventCallback(
+ base::DeepLinkEvent::TypeId(), deep_link_event_callback_);
app_status_ = kShutDownAppStatus;
}
@@ -538,17 +546,34 @@
DLOG(INFO) << "Got quit event.";
app_status_ = kWillQuitAppStatus;
Quit();
+ } else if (app_event->type() == system_window::ApplicationEvent::kPause) {
+ DLOG(INFO) << "Got pause event.";
+ app_status_ = kPausedAppStatus;
+ ++app_pause_count_;
+ } else if (app_event->type() == system_window::ApplicationEvent::kUnpause) {
+ DLOG(INFO) << "Got unpause event.";
+ app_status_ = kRunningAppStatus;
+ ++app_unpause_count_;
} else if (app_event->type() == system_window::ApplicationEvent::kSuspend) {
DLOG(INFO) << "Got suspend event.";
- app_status_ = kPausedAppStatus;
+ app_status_ = kSuspendedAppStatus;
++app_suspend_count_;
} else if (app_event->type() == system_window::ApplicationEvent::kResume) {
DLOG(INFO) << "Got resume event.";
- app_status_ = kRunningAppStatus;
+ app_status_ = kPausedAppStatus;
++app_resume_count_;
}
}
+void Application::OnDeepLinkEvent(const base::Event* event) {
+ const base::DeepLinkEvent* deep_link_event =
+ base::polymorphic_downcast<const base::DeepLinkEvent*>(event);
+ // TODO: Remove this when terminal application states are properly handled.
+ if (deep_link_event->IsH5vccLink()) {
+ browser_module_->Navigate(GURL(deep_link_event->link()));
+ }
+}
+
void Application::WebModuleRecreated() {
#if defined(ENABLE_WEBDRIVER)
if (web_driver_module_) {
@@ -592,6 +617,10 @@
&lifetime_in_ms_, sizeof(lifetime_in_ms_));
base::UserLog::Register(base::UserLog::kAppStatusIndex, "AppStatus",
&app_status_, sizeof(app_status_));
+ base::UserLog::Register(base::UserLog::kAppPauseCountIndex, "PauseCnt",
+ &app_pause_count_, sizeof(app_pause_count_));
+ base::UserLog::Register(base::UserLog::kAppUnpauseCountIndex, "UnpauseCnt",
+ &app_unpause_count_, sizeof(app_unpause_count_));
base::UserLog::Register(base::UserLog::kAppSuspendCountIndex, "SuspendCnt",
&app_suspend_count_, sizeof(app_suspend_count_));
base::UserLog::Register(base::UserLog::kAppResumeCountIndex, "ResumeCnt",
diff --git a/src/cobalt/browser/application.h b/src/cobalt/browser/application.h
index 3b5189a..dc4a362 100644
--- a/src/cobalt/browser/application.h
+++ b/src/cobalt/browser/application.h
@@ -67,6 +67,9 @@
// Called to handle an application event.
void OnApplicationEvent(const base::Event* event);
+ // Called to handle a deep link event.
+ void OnDeepLinkEvent(const base::Event* event);
+
// Called when a navigation occurs in the BrowserModule.
void WebModuleRecreated();
@@ -88,6 +91,7 @@
base::EventCallback account_event_callback_;
base::EventCallback network_event_callback_;
base::EventCallback application_event_callback_;
+ base::EventCallback deep_link_event_callback_;
// Thread checkers to ensure that callbacks for network and application events
// always occur on the same thread.
@@ -113,6 +117,7 @@
kUninitializedAppStatus,
kRunningAppStatus,
kPausedAppStatus,
+ kSuspendedAppStatus,
kWillQuitAppStatus,
kQuitAppStatus,
kShutDownAppStatus,
@@ -156,6 +161,8 @@
static AppStatus app_status_;
static int app_suspend_count_;
static int app_resume_count_;
+ static int app_pause_count_;
+ static int app_unpause_count_;
static NetworkStatus network_status_;
static int network_connect_count_;
diff --git a/src/cobalt/browser/browser.gyp b/src/cobalt/browser/browser.gyp
index 88b34e9..1115bbd 100644
--- a/src/cobalt/browser/browser.gyp
+++ b/src/cobalt/browser/browser.gyp
@@ -35,6 +35,8 @@
'resource_provider_array_buffer_allocator.h',
'splash_screen.cc',
'splash_screen.h',
+ 'storage_upgrade_handler.cc',
+ 'storage_upgrade_handler.h',
'switches.cc',
'switches.h',
'trace_manager.cc',
@@ -117,6 +119,25 @@
},
{
+ 'target_name': 'browser_test',
+ 'type': '<(gtest_target_type)',
+ 'sources': [
+ 'storage_upgrade_handler_test.cc',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:run_all_unittests',
+ '<(DEPTH)/cobalt/base/base.gyp:base',
+ '<(DEPTH)/cobalt/dom/dom.gyp:dom',
+ '<(DEPTH)/cobalt/network/network.gyp:network',
+ '<(DEPTH)/cobalt/storage/storage.gyp:storage',
+ '<(DEPTH)/cobalt/storage/storage.gyp:storage_upgrade_copy_test_data',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ 'browser',
+ ],
+ },
+
+ {
'target_name': 'browser_copy_test_data',
'type': 'none',
'actions': [
diff --git a/src/cobalt/browser/browser_module.cc b/src/cobalt/browser/browser_module.cc
index 73de733..621c12a 100644
--- a/src/cobalt/browser/browser_module.cc
+++ b/src/cobalt/browser/browser_module.cc
@@ -31,6 +31,7 @@
#include "cobalt/base/tokens.h"
#include "cobalt/browser/resource_provider_array_buffer_allocator.h"
#include "cobalt/browser/screen_shot_writer.h"
+#include "cobalt/browser/storage_upgrade_handler.h"
#include "cobalt/browser/switches.h"
#include "cobalt/dom/csp_delegate_factory.h"
#include "cobalt/dom/keycode.h"
@@ -121,7 +122,10 @@
ALLOW_THIS_IN_INITIALIZER_LIST(
weak_this_(weak_ptr_factory_.GetWeakPtr())),
self_message_loop_(MessageLoop::current()),
- storage_manager_(options.storage_manager_options),
+ storage_manager_(
+ scoped_ptr<StorageUpgradeHandler>(new StorageUpgradeHandler(url))
+ .PassAs<storage::StorageManager::UpgradeHandler>(),
+ options.storage_manager_options),
#if defined(OS_STARBOARD)
is_rendered_(false),
#endif // OS_STARBOARD
diff --git a/src/cobalt/browser/debug_console/debug_console.js b/src/cobalt/browser/debug_console/debug_console.js
index 58282d5..7c2f8a5 100644
--- a/src/cobalt/browser/debug_console/debug_console.js
+++ b/src/cobalt/browser/debug_console/debug_console.js
@@ -258,6 +258,7 @@
createCommandInput();
createMessageLog();
createDebuggerClient();
+ showHud(false);
showConsole(false);
createConsoleValues();
initDebugCommands();
diff --git a/src/cobalt/browser/starboard/event_handler.cc b/src/cobalt/browser/starboard/event_handler.cc
index be5e494..e244912 100644
--- a/src/cobalt/browser/starboard/event_handler.cc
+++ b/src/cobalt/browser/starboard/event_handler.cc
@@ -54,12 +54,18 @@
void EventHandler::DispatchEvent(const SbEvent* starboard_event) const {
// Create a Cobalt event from the Starboard event, if recognized.
scoped_ptr<base::Event> cobalt_event;
- if (starboard_event->type == kSbEventTypeResume) {
+ if (starboard_event->type == kSbEventTypePause) {
cobalt_event.reset(new system_window::ApplicationEvent(
- system_window::ApplicationEvent::kResume));
+ system_window::ApplicationEvent::kPause));
+ } else if (starboard_event->type == kSbEventTypeUnpause) {
+ cobalt_event.reset(new system_window::ApplicationEvent(
+ system_window::ApplicationEvent::kUnpause));
} else if (starboard_event->type == kSbEventTypeSuspend) {
cobalt_event.reset(new system_window::ApplicationEvent(
system_window::ApplicationEvent::kSuspend));
+ } else if (starboard_event->type == kSbEventTypeResume) {
+ cobalt_event.reset(new system_window::ApplicationEvent(
+ system_window::ApplicationEvent::kResume));
} else if (starboard_event->type == kSbEventTypeNetworkConnect) {
cobalt_event.reset(
new network::NetworkEvent(network::NetworkEvent::kConnection));
diff --git a/src/cobalt/browser/storage_upgrade_handler.cc b/src/cobalt/browser/storage_upgrade_handler.cc
new file mode 100644
index 0000000..b302b91
--- /dev/null
+++ b/src/cobalt/browser/storage_upgrade_handler.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "cobalt/browser/storage_upgrade_handler.h"
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "cobalt/dom/local_storage_database.h"
+#include "cobalt/dom/storage_area.h"
+#include "cobalt/network/persistent_cookie_store.h"
+#include "cobalt/storage/storage_manager.h"
+#include "cobalt/storage/upgrade/upgrade_reader.h"
+#include "net/cookies/canonical_cookie.h"
+
+namespace cobalt {
+namespace browser {
+namespace {
+void OnCookiesLoaded(const std::vector<net::CanonicalCookie*>& cookies) {
+ // We don't care about the local copies of the cookies returned to us.
+ for (size_t i = 0; i < cookies.size(); i++) {
+ delete cookies[i];
+ }
+}
+} // namespace
+
+StorageUpgradeHandler::StorageUpgradeHandler(const GURL& url)
+ : default_local_storage_id_(
+ dom::StorageArea::GetLocalStorageIdForUrl(url)) {}
+
+void StorageUpgradeHandler::OnUpgrade(storage::StorageManager* storage,
+ const char* data, int size) {
+ storage::upgrade::UpgradeReader upgrade_reader(data, size);
+ int num_cookies = upgrade_reader.GetNumCookies();
+ int num_local_storage_entries = upgrade_reader.GetNumLocalStorageEntries();
+ DLOG(INFO) << "Upgrading legacy save data: " << num_cookies << " cookies, "
+ << num_local_storage_entries << " local storage entries.";
+
+ if (num_cookies > 0) {
+ scoped_refptr<network::PersistentCookieStore> cookie_store(
+ new network::PersistentCookieStore(storage));
+ // Load the current cookies to ensure the database table is initialized.
+ cookie_store->Load(base::Bind(OnCookiesLoaded));
+ for (int i = 0; i < num_cookies; i++) {
+ const net::CanonicalCookie* cookie = upgrade_reader.GetCookie(i);
+ DCHECK(cookie);
+ cookie_store->AddCookie(*cookie);
+ }
+ }
+
+ if (num_local_storage_entries > 0) {
+ dom::LocalStorageDatabase local_storage_database(storage);
+ for (int i = 0; i < num_local_storage_entries; i++) {
+ const storage::upgrade::UpgradeReader::LocalStorageEntry*
+ local_storage_entry = upgrade_reader.GetLocalStorageEntry(i);
+ DCHECK(local_storage_entry);
+ local_storage_database.Write(default_local_storage_id_,
+ local_storage_entry->key,
+ local_storage_entry->value);
+ }
+ }
+}
+
+} // namespace browser
+} // namespace cobalt
diff --git a/src/cobalt/browser/storage_upgrade_handler.h b/src/cobalt/browser/storage_upgrade_handler.h
new file mode 100644
index 0000000..15c8962
--- /dev/null
+++ b/src/cobalt/browser/storage_upgrade_handler.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COBALT_BROWSER_STORAGE_UPGRADE_HANDLER_H_
+#define COBALT_BROWSER_STORAGE_UPGRADE_HANDLER_H_
+
+#include <string>
+
+#include "cobalt/storage/storage_manager.h"
+
+namespace cobalt {
+namespace browser {
+
+// Handles save data in upgrade format.
+class StorageUpgradeHandler : public storage::StorageManager::UpgradeHandler {
+ public:
+ explicit StorageUpgradeHandler(const GURL& gurl);
+
+ void OnUpgrade(storage::StorageManager* storage, const char* data,
+ int size) OVERRIDE;
+
+ const std::string& default_local_storage_id() const {
+ return default_local_storage_id_;
+ }
+
+ private:
+ std::string default_local_storage_id_;
+};
+
+} // namespace browser
+} // namespace cobalt
+
+#endif // COBALT_BROWSER_STORAGE_UPGRADE_HANDLER_H_
diff --git a/src/cobalt/browser/storage_upgrade_handler_test.cc b/src/cobalt/browser/storage_upgrade_handler_test.cc
new file mode 100644
index 0000000..79ec4a5
--- /dev/null
+++ b/src/cobalt/browser/storage_upgrade_handler_test.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include "base/base_paths.h"
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/path_service.h"
+#include "base/platform_file.h"
+#include "cobalt/browser/storage_upgrade_handler.h"
+#include "cobalt/dom/local_storage_database.h"
+#include "cobalt/dom/storage_area.h"
+#include "cobalt/network/persistent_cookie_store.h"
+#include "cobalt/storage/savegame_fake.h"
+#include "cobalt/storage/storage_manager.h"
+#include "cobalt/storage/upgrade/upgrade_reader.h"
+#include "googleurl/src/gurl.h"
+#include "net/cookies/canonical_cookie.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cobalt {
+namespace browser {
+
+namespace {
+
+class CallbackWaiter {
+ public:
+ CallbackWaiter() : was_called_event_(true, false) {}
+ virtual ~CallbackWaiter() {}
+ bool TimedWait() {
+ return was_called_event_.TimedWait(base::TimeDelta::FromSeconds(5));
+ }
+
+ protected:
+ void Signal() { was_called_event_.Signal(); }
+
+ private:
+ base::WaitableEvent was_called_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallbackWaiter);
+};
+
+class FlushWaiter : public CallbackWaiter {
+ public:
+ FlushWaiter() {}
+ void OnFlushDone() { Signal(); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FlushWaiter);
+};
+
+class CookieWaiter : public CallbackWaiter {
+ public:
+ CookieWaiter() {}
+ ~CookieWaiter() {
+ for (size_t i = 0; i < cookies_.size(); i++) {
+ delete cookies_[i];
+ }
+ }
+
+ void OnCookiesLoaded(const std::vector<net::CanonicalCookie*>& cookies) {
+ cookies_ = cookies;
+ Signal();
+ }
+
+ const std::vector<net::CanonicalCookie*>& GetCookies() const {
+ return cookies_;
+ }
+
+ private:
+ std::vector<net::CanonicalCookie*> cookies_;
+ DISALLOW_COPY_AND_ASSIGN(CookieWaiter);
+};
+
+class LocalStorageEntryWaiter : public CallbackWaiter {
+ public:
+ LocalStorageEntryWaiter() {}
+ ~LocalStorageEntryWaiter() {}
+
+ void OnEntriesLoaded(scoped_ptr<dom::StorageArea::StorageMap> entries) {
+ entries_ = entries.Pass();
+ Signal();
+ }
+
+ dom::StorageArea::StorageMap* GetEntries() const { return entries_.get(); }
+
+ private:
+ scoped_ptr<dom::StorageArea::StorageMap> entries_;
+ DISALLOW_COPY_AND_ASSIGN(LocalStorageEntryWaiter);
+};
+
+void ReadFileToString(const char* pathname, std::string* string_out) {
+ EXPECT_TRUE(pathname);
+ EXPECT_TRUE(string_out);
+ FilePath file_path;
+ EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
+ file_path = file_path.Append(pathname);
+ EXPECT_TRUE(file_util::ReadFileToString(file_path, string_out));
+ const char* data = string_out->c_str();
+ const int size = static_cast<int>(string_out->length());
+ EXPECT_GT(size, 0);
+ EXPECT_LE(size, 10 * 1024 * 1024);
+ EXPECT_TRUE(storage::upgrade::UpgradeReader::IsUpgradeData(data, size));
+}
+
+int GetNumCookies(storage::StorageManager* storage) {
+ scoped_refptr<network::PersistentCookieStore> cookie_store(
+ new network::PersistentCookieStore(storage));
+ CookieWaiter waiter;
+ cookie_store->Load(
+ base::Bind(&CookieWaiter::OnCookiesLoaded, base::Unretained(&waiter)));
+ EXPECT_EQ(true, waiter.TimedWait());
+ return static_cast<int>(waiter.GetCookies().size());
+}
+
+int GetNumLocalStorageEntries(storage::StorageManager* storage,
+ const std::string& identifier) {
+ dom::LocalStorageDatabase local_storage_database(storage);
+ LocalStorageEntryWaiter waiter;
+ local_storage_database.ReadAll(
+ identifier, base::Bind(&LocalStorageEntryWaiter::OnEntriesLoaded,
+ base::Unretained(&waiter)));
+ EXPECT_EQ(true, waiter.TimedWait());
+ return static_cast<int>(waiter.GetEntries()->size());
+}
+
+} // namespace
+
+TEST(StorageUpgradeHandlerTest, UpgradeFullData) {
+ MessageLoop message_loop_(MessageLoop::TYPE_DEFAULT);
+ std::string file_contents;
+ ReadFileToString("cobalt/storage/upgrade/testdata/full_data_v1.json",
+ &file_contents);
+ StorageUpgradeHandler* upgrade_handler =
+ new StorageUpgradeHandler(GURL("https://www.youtube.com"));
+ storage::StorageManager::Options options;
+ options.savegame_options.delete_on_destruction = true;
+ options.savegame_options.factory = &storage::SavegameFake::Create;
+ storage::StorageManager storage(
+ scoped_ptr<storage::StorageManager::UpgradeHandler>(upgrade_handler),
+ options);
+
+ // Our storage should be empty at this point.
+ EXPECT_EQ(GetNumCookies(&storage), 0);
+ EXPECT_EQ(GetNumLocalStorageEntries(
+ &storage, upgrade_handler->default_local_storage_id()),
+ 0);
+
+ upgrade_handler->OnUpgrade(&storage, file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
+
+ FlushWaiter waiter;
+ storage.FlushNow(
+ base::Bind(&FlushWaiter::OnFlushDone, base::Unretained(&waiter)));
+ EXPECT_EQ(true, waiter.TimedWait());
+
+ // We should now have 2 cookies and 2 local storage entries.
+ EXPECT_EQ(GetNumCookies(&storage), 2);
+ EXPECT_EQ(GetNumLocalStorageEntries(
+ &storage, upgrade_handler->default_local_storage_id()),
+ 2);
+
+ message_loop_.RunUntilIdle();
+}
+
+} // namespace browser
+} // namespace cobalt
diff --git a/src/cobalt/build/build.id b/src/cobalt/build/build.id
index 70cae86..e3f28c7 100644
--- a/src/cobalt/build/build.id
+++ b/src/cobalt/build/build.id
@@ -1 +1 @@
-11837
\ No newline at end of file
+12096
\ No newline at end of file
diff --git a/src/cobalt/cssom/computed_style.cc b/src/cobalt/cssom/computed_style.cc
index 9d5d0c9..c2c1a6e 100644
--- a/src/cobalt/cssom/computed_style.cc
+++ b/src/cobalt/cssom/computed_style.cc
@@ -1688,6 +1688,13 @@
absolute_url = url_value->Resolve(base_url_);
}
+ if (!absolute_url.is_valid()) {
+ DLOG(WARNING) << "Invalid url: " << absolute_url.spec();
+ // No further process is needed if the url is invalid.
+ computed_background_image_ = KeywordValue::GetNone();
+ return;
+ }
+
computed_background_image_ = new AbsoluteURLValue(absolute_url);
}
diff --git a/src/cobalt/dom/local_storage_database_test.cc b/src/cobalt/dom/local_storage_database_test.cc
index 55963ec..6f4352c 100644
--- a/src/cobalt/dom/local_storage_database_test.cc
+++ b/src/cobalt/dom/local_storage_database_test.cc
@@ -83,17 +83,25 @@
return test_path.Append("local_storage_database_test.bin").value();
}
+class DummyUpgradeHandler : public storage::StorageManager::UpgradeHandler {
+ void OnUpgrade(storage::StorageManager* /*storage*/, const char* /*data*/,
+ int /*size*/) OVERRIDE {}
+};
+
class LocalStorageDatabaseTest : public ::testing::Test {
protected:
LocalStorageDatabaseTest()
: message_loop_(MessageLoop::TYPE_DEFAULT),
host_("https://www.example.com") {
+ scoped_ptr<storage::StorageManager::UpgradeHandler> upgrade_handler(
+ new DummyUpgradeHandler());
storage::StorageManager::Options options;
options.savegame_options.path_override = GetSavePath();
options.savegame_options.delete_on_destruction = true;
options.savegame_options.factory = &storage::SavegameFake::Create;
- storage_manager_.reset(new storage::StorageManager(options));
+ storage_manager_.reset(
+ new storage::StorageManager(upgrade_handler.Pass(), options));
db_.reset(new LocalStorageDatabase(storage_manager_.get()));
}
diff --git a/src/cobalt/dom/storage_area.cc b/src/cobalt/dom/storage_area.cc
index 775395f..c5e13f8 100644
--- a/src/cobalt/dom/storage_area.cc
+++ b/src/cobalt/dom/storage_area.cc
@@ -20,7 +20,6 @@
#include "base/stringprintf.h"
#include "cobalt/dom/local_storage_database.h"
#include "cobalt/dom/storage.h"
-#include "googleurl/src/gurl.h"
namespace cobalt {
namespace dom {
@@ -168,5 +167,10 @@
read_event_.Signal();
}
+// static
+std::string StorageArea::GetLocalStorageIdForUrl(const GURL& url) {
+ return OriginToDatabaseIdentifier(url.GetOrigin()) + kLocalStorageSuffix;
+}
+
} // namespace dom
} // namespace cobalt
diff --git a/src/cobalt/dom/storage_area.h b/src/cobalt/dom/storage_area.h
index c85bf67..634215f 100644
--- a/src/cobalt/dom/storage_area.h
+++ b/src/cobalt/dom/storage_area.h
@@ -23,6 +23,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/optional.h"
#include "base/synchronization/waitable_event.h"
+#include "googleurl/src/gurl.h"
namespace cobalt {
namespace dom {
@@ -54,6 +55,8 @@
Storage* storage_node() const { return storage_node_; }
const std::string& identifier() const { return identifier_; }
+ static std::string GetLocalStorageIdForUrl(const GURL& url);
+
private:
void Init();
void OnInitComplete(scoped_ptr<StorageMap> data);
diff --git a/src/cobalt/dom_parser/libxml_html_parser_wrapper.cc b/src/cobalt/dom_parser/libxml_html_parser_wrapper.cc
index 75e760b..968dbe5 100644
--- a/src/cobalt/dom_parser/libxml_html_parser_wrapper.cc
+++ b/src/cobalt/dom_parser/libxml_html_parser_wrapper.cc
@@ -110,7 +110,7 @@
return;
}
- if (CheckInputAndUpdateSeverity(data, size) >= kError) {
+ if (CheckInputAndUpdateSeverity(data, size) == kFatal) {
return;
}
diff --git a/src/cobalt/dom_parser/libxml_parser_wrapper.cc b/src/cobalt/dom_parser/libxml_parser_wrapper.cc
index 67521c2..5e331f7 100644
--- a/src/cobalt/dom_parser/libxml_parser_wrapper.cc
+++ b/src/cobalt/dom_parser/libxml_parser_wrapper.cc
@@ -219,12 +219,15 @@
void LibxmlParserWrapper::OnParsingIssue(IssueSeverity severity,
const std::string& message) {
+ DCHECK(severity >= kWarning && severity <= kFatal);
if (severity > max_severity_) {
max_severity_ = severity;
}
if (severity < LibxmlParserWrapper::kFatal) {
- LOG(WARNING) << message;
+ LOG(WARNING) << "Libxml "
+ << (severity == kWarning ? "Warning: " : "Error: ") << message;
} else {
+ LOG(ERROR) << "Libxml Fatal Error: " << message;
if (!error_callback_.is_null()) {
error_callback_.Run(message);
}
@@ -238,23 +241,23 @@
LibxmlParserWrapper::IssueSeverity
LibxmlParserWrapper::CheckInputAndUpdateSeverity(const char* data,
size_t size) {
- if (max_severity_ >= kError) {
+ if (max_severity_ == kFatal) {
return max_severity_;
}
// Check the total input size.
total_input_size_ += size;
if (total_input_size_ > kMaxTotalInputSize) {
- static const char kErrorTooLong[] = "Parser input is too long.";
- OnParsingIssue(kError, kErrorTooLong);
+ static const char kMessageInputTooLong[] = "Parser input is too long.";
+ OnParsingIssue(kFatal, kMessageInputTooLong);
return max_severity_;
}
// Check the encoding of the input.
if (!IsStringUTF8(std::string(data, size))) {
- static const char kErrorNotUTF8[] =
+ static const char kMessageInputNotUTF8[] =
"Parser input contains non-UTF8 characters.";
- OnParsingIssue(kError, kErrorNotUTF8);
+ OnParsingIssue(kFatal, kMessageInputNotUTF8);
return max_severity_;
}
diff --git a/src/cobalt/dom_parser/libxml_parser_wrapper.h b/src/cobalt/dom_parser/libxml_parser_wrapper.h
index bbeb21a..98d210e 100644
--- a/src/cobalt/dom_parser/libxml_parser_wrapper.h
+++ b/src/cobalt/dom_parser/libxml_parser_wrapper.h
@@ -61,9 +61,9 @@
public:
enum IssueSeverity {
kNoIssue,
- kWarning,
- kError,
- kFatal,
+ kWarning, // A simple warning
+ kError, // A recoverable error
+ kFatal, // A fatal error
kIssueSeverityCount,
};
@@ -137,8 +137,9 @@
}
private:
- // Maximum total input size, 1MB.
- static const size_t kMaxTotalInputSize = 1 * 1024 * 1024;
+ // Maximum total input size, as specified in Libxml's value
+ // XML_MAX_TEXT_LENGTH in parserInternals.h.
+ static const size_t kMaxTotalInputSize = 10000000;
const scoped_refptr<dom::Document> document_;
const scoped_refptr<dom::Node> parent_node_;
diff --git a/src/cobalt/dom_parser/libxml_xml_parser_wrapper.cc b/src/cobalt/dom_parser/libxml_xml_parser_wrapper.cc
index c933f5e..787970d 100644
--- a/src/cobalt/dom_parser/libxml_xml_parser_wrapper.cc
+++ b/src/cobalt/dom_parser/libxml_xml_parser_wrapper.cc
@@ -79,7 +79,7 @@
return;
}
- if (CheckInputAndUpdateSeverity(data, size) >= kError) {
+ if (CheckInputAndUpdateSeverity(data, size) == kFatal) {
return;
}
diff --git a/src/cobalt/h5vcc/h5vcc_runtime.cc b/src/cobalt/h5vcc/h5vcc_runtime.cc
index 8b0f871..43d71cc 100644
--- a/src/cobalt/h5vcc/h5vcc_runtime.cc
+++ b/src/cobalt/h5vcc/h5vcc_runtime.cc
@@ -14,9 +14,10 @@
* limitations under the License.
*/
+#include "cobalt/h5vcc/h5vcc_runtime.h"
+
#include "cobalt/base/deep_link_event.h"
#include "cobalt/base/polymorphic_downcast.h"
-#include "cobalt/h5vcc/h5vcc_runtime.h"
#include "cobalt/system_window/application_event.h"
namespace cobalt {
@@ -67,11 +68,11 @@
void H5vccRuntime::OnApplicationEvent(const base::Event* event) {
const system_window::ApplicationEvent* app_event =
base::polymorphic_downcast<const system_window::ApplicationEvent*>(event);
- if (app_event->type() == system_window::ApplicationEvent::kSuspend) {
+ if (app_event->type() == system_window::ApplicationEvent::kPause) {
DLOG(INFO) << "Got pause event.";
on_pause()->DispatchEvent();
- } else if (app_event->type() == system_window::ApplicationEvent::kResume) {
- DLOG(INFO) << "Got resume event.";
+ } else if (app_event->type() == system_window::ApplicationEvent::kUnpause) {
+ DLOG(INFO) << "Got unpause event.";
on_resume()->DispatchEvent();
}
}
@@ -79,8 +80,10 @@
void H5vccRuntime::OnDeepLinkEvent(const base::Event* event) {
const base::DeepLinkEvent* deep_link_event =
base::polymorphic_downcast<const base::DeepLinkEvent*>(event);
- DLOG(INFO) << "Got deep link event: " << deep_link_event->link();
- on_deep_link()->DispatchEvent(deep_link_event->link());
+ if (!deep_link_event->IsH5vccLink()) {
+ DLOG(INFO) << "Got deep link event: " << deep_link_event->link();
+ on_deep_link()->DispatchEvent(deep_link_event->link());
+ }
}
} // namespace h5vcc
} // namespace cobalt
diff --git a/src/cobalt/network/persistent_cookie_store_test.cc b/src/cobalt/network/persistent_cookie_store_test.cc
index 7d2762b..77285d0 100644
--- a/src/cobalt/network/persistent_cookie_store_test.cc
+++ b/src/cobalt/network/persistent_cookie_store_test.cc
@@ -120,6 +120,11 @@
DISALLOW_COPY_AND_ASSIGN(CookieVerifier);
};
+class DummyUpgradeHandler : public storage::StorageManager::UpgradeHandler {
+ void OnUpgrade(storage::StorageManager* /*storage*/, const char* /*data*/,
+ int /*size*/) OVERRIDE {}
+};
+
std::string GetSavePath() {
FilePath test_path;
CHECK(PathService::Get(paths::DIR_COBALT_TEST_OUT, &test_path));
@@ -129,12 +134,15 @@
class PersistentCookieStoreTest : public ::testing::Test {
protected:
PersistentCookieStoreTest() : message_loop_(MessageLoop::TYPE_DEFAULT) {
+ scoped_ptr<storage::StorageManager::UpgradeHandler> upgrade_handler(
+ new DummyUpgradeHandler());
storage::StorageManager::Options options;
options.savegame_options.path_override = GetSavePath();
options.savegame_options.delete_on_destruction = true;
options.savegame_options.factory = &storage::SavegameFake::Create;
- storage_manager_.reset(new storage::StorageManager(options));
+ storage_manager_.reset(
+ new storage::StorageManager(upgrade_handler.Pass(), options));
cookie_store_ = new PersistentCookieStore(storage_manager_.get());
}
diff --git a/src/cobalt/renderer/backend/blitter/graphics_context.cc b/src/cobalt/renderer/backend/blitter/graphics_context.cc
index b6e4210..3e9613f 100644
--- a/src/cobalt/renderer/backend/blitter/graphics_context.cc
+++ b/src/cobalt/renderer/backend/blitter/graphics_context.cc
@@ -75,6 +75,12 @@
return pixels.Pass();
}
+void GraphicsContextBlitter::Finish() {
+ // Note: flushing the context doesn't actually guarantee that drawing has
+ // finished.
+ SbBlitterFlushContext(context_);
+}
+
} // namespace backend
} // namespace renderer
} // namespace cobalt
diff --git a/src/cobalt/renderer/backend/blitter/graphics_context.h b/src/cobalt/renderer/backend/blitter/graphics_context.h
index 5c15184..5fc3330 100644
--- a/src/cobalt/renderer/backend/blitter/graphics_context.h
+++ b/src/cobalt/renderer/backend/blitter/graphics_context.h
@@ -38,6 +38,7 @@
const math::Size& dimensions) OVERRIDE;
scoped_array<uint8_t> DownloadPixelDataAsRGBA(
const scoped_refptr<RenderTarget>& render_target) OVERRIDE;
+ void Finish() OVERRIDE;
SbBlitterContext GetSbBlitterContext() const { return context_; }
SbBlitterDevice GetSbBlitterDevice() const { return device_; }
diff --git a/src/cobalt/renderer/backend/egl/graphics_context.cc b/src/cobalt/renderer/backend/egl/graphics_context.cc
index 3482472..2f39ddb 100644
--- a/src/cobalt/renderer/backend/egl/graphics_context.cc
+++ b/src/cobalt/renderer/backend/egl/graphics_context.cc
@@ -211,6 +211,7 @@
GraphicsContextEGL::~GraphicsContextEGL() {
MakeCurrent();
+ GL_CALL(glFinish());
GL_CALL(glDeleteBuffers(1, &blit_vertex_buffer_));
GL_CALL(glDeleteProgram(blit_program_));
GL_CALL(glDeleteShader(blit_fragment_shader_));
@@ -350,6 +351,11 @@
return pixels.Pass();
}
+void GraphicsContextEGL::Finish() {
+ ScopedMakeCurrent scoped_current_context(this);
+ GL_CALL(glFinish());
+}
+
void GraphicsContextEGL::Blit(GLuint texture, int x, int y, int width,
int height) {
// Render a texture to the specified output rectangle on the render target.
diff --git a/src/cobalt/renderer/backend/egl/graphics_context.h b/src/cobalt/renderer/backend/egl/graphics_context.h
index 57d77aa..c69ed50 100644
--- a/src/cobalt/renderer/backend/egl/graphics_context.h
+++ b/src/cobalt/renderer/backend/egl/graphics_context.h
@@ -58,6 +58,8 @@
scoped_array<uint8_t> DownloadPixelDataAsRGBA(
const scoped_refptr<RenderTarget>& render_target) OVERRIDE;
+ void Finish() OVERRIDE;
+
// Helper class to allow one to create a RAII object that will acquire the
// current context upon construction and release it upon destruction.
class ScopedMakeCurrent {
@@ -106,6 +108,13 @@
void Blit(GLuint texture, int x, int y, int width, int height);
+ bool ReadPixelsNeedVerticalFlip() {
+ if (!read_pixels_needs_vertical_flip_) {
+ read_pixels_needs_vertical_flip_ = ComputeReadPixelsNeedVerticalFlip();
+ }
+ return *read_pixels_needs_vertical_flip_;
+ }
+
private:
// Performs a test to determine if the pixel data returned by glReadPixels
// needs to be vertically flipped or not. This test is expensive and so
diff --git a/src/cobalt/renderer/backend/graphics_context.h b/src/cobalt/renderer/backend/graphics_context.h
index e5743f5..bd51d53 100644
--- a/src/cobalt/renderer/backend/graphics_context.h
+++ b/src/cobalt/renderer/backend/graphics_context.h
@@ -62,6 +62,9 @@
virtual scoped_array<uint8_t> DownloadPixelDataAsRGBA(
const scoped_refptr<RenderTarget>& render_target) = 0;
+ // Waits until all drawing is finished.
+ virtual void Finish() = 0;
+
private:
GraphicsSystem* system_;
};
diff --git a/src/cobalt/renderer/backend/graphics_context_stub.h b/src/cobalt/renderer/backend/graphics_context_stub.h
index 5cb36bd..b2e0f1b 100644
--- a/src/cobalt/renderer/backend/graphics_context_stub.h
+++ b/src/cobalt/renderer/backend/graphics_context_stub.h
@@ -45,6 +45,8 @@
return scoped_array<uint8_t>(
new uint8_t[render_target->GetSize().GetArea() * 4]);
}
+
+ void Finish() OVERRIDE {}
};
} // namespace backend
diff --git a/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi b/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
index b0e8820..7500fc3 100644
--- a/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
+++ b/src/cobalt/renderer/glimp_shaders/glsl/shaders.gypi
@@ -64,6 +64,7 @@
'vertex_skia_antialiased_color_only.glsl',
'vertex_skia_antialiased_oval.glsl',
'vertex_skia_color_only.glsl',
+ 'vertex_mesh.glsl',
'vertex_skia_texcoords_and_color.glsl',
'vertex_skia_texcoords_and_color_with_texcoord_matrix.glsl',
'vertex_skia_texcoords_derived_from_position.glsl',
diff --git a/src/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl b/src/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl
new file mode 100644
index 0000000..cce9ab8
--- /dev/null
+++ b/src/cobalt/renderer/glimp_shaders/glsl/vertex_mesh.glsl
@@ -0,0 +1,8 @@
+attribute vec3 a_position;
+attribute vec2 a_tex_coord;
+varying vec2 v_tex_coord;
+uniform mat4 u_mvp_matrix;
+void main() {
+ gl_Position = u_mvp_matrix * vec4(a_position.xyz, 1.0);
+ v_tex_coord = a_tex_coord;
+}
diff --git a/src/cobalt/renderer/rasterizer/benchmark.cc b/src/cobalt/renderer/rasterizer/benchmark.cc
index 097d0bf..5507cb7 100644
--- a/src/cobalt/renderer/rasterizer/benchmark.cc
+++ b/src/cobalt/renderer/rasterizer/benchmark.cc
@@ -121,6 +121,7 @@
// Submit the render tree to be rendered.
rasterizer->Submit(animated, test_surface);
+ graphics_context->Finish();
if (i == 0) {
// Enable tracing again after one iteration has passed and any lazy
@@ -183,6 +184,10 @@
CreateDefaultRasterizer(graphics_context.get());
ResourceProvider* resource_provider = rasterizer->GetResourceProvider();
+ if (!resource_provider->AlphaFormatSupported(alpha_format)) {
+ // Only run the test if the alpha format is supported.
+ return;
+ }
const int kIterationCount = 20;
const Size kImageSize(400, 400);
diff --git a/src/cobalt/renderer/rasterizer/skia/font.cc b/src/cobalt/renderer/rasterizer/skia/font.cc
index 06bbb20..7f357b9 100644
--- a/src/cobalt/renderer/rasterizer/skia/font.cc
+++ b/src/cobalt/renderer/rasterizer/skia/font.cc
@@ -117,7 +117,8 @@
paint.setTextEncoding(SkPaint::kGlyphID_TextEncoding);
SkRect skia_bounds;
- float width = paint.measureText(&glyph, 2, &skia_bounds);
+ float width =
+ paint.measureText(&glyph, sizeof(render_tree::GlyphIndex), &skia_bounds);
// Both cache and return the glyph's bounds.
if (glyph < kPrimaryPageSize) {
diff --git a/src/cobalt/script/mozjs/mozjs_global_environment.cc b/src/cobalt/script/mozjs/mozjs_global_environment.cc
index 9a79991..1cf31fd 100644
--- a/src/cobalt/script/mozjs/mozjs_global_environment.cc
+++ b/src/cobalt/script/mozjs/mozjs_global_environment.cc
@@ -426,7 +426,9 @@
if (global_object_proxy && global_object_proxy->last_error_message_) {
*(global_object_proxy->last_error_message_) = error_message;
} else {
- DLOG(ERROR) << "JS Error: " << error_message;
+ const char *filename = report->filename ? report->filename : "(none)";
+ LOG(ERROR) << "JS Error: " << filename << ":" << report->lineno << ":"
+ << report->column << ": " << error_message;
}
}
diff --git a/src/cobalt/speech/audio_encoder_flac.cc b/src/cobalt/speech/audio_encoder_flac.cc
new file mode 100644
index 0000000..d4403e1
--- /dev/null
+++ b/src/cobalt/speech/audio_encoder_flac.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "cobalt/speech/audio_encoder_flac.h"
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/string_number_conversions.h"
+
+namespace cobalt {
+namespace speech {
+
+namespace {
+const char kContentTypeFLAC[] = "audio/x-flac; rate=";
+const int kFLACCompressionLevel = 0; // 0 for speed
+const int kBitsPerSample = 16;
+const float kMaxInt16AsFloat32 = 32767.0f;
+} // namespace
+
+AudioEncoderFlac::AudioEncoderFlac(int sample_rate)
+ : encoder_(FLAC__stream_encoder_new()) {
+ DCHECK(encoder_);
+
+ // Set the number of channels to be encoded.
+ FLAC__stream_encoder_set_channels(encoder_, 1);
+ // Set the sample resolution of the input to be encoded.
+ FLAC__stream_encoder_set_bits_per_sample(encoder_, kBitsPerSample);
+ // Set the sample rate (in Hz) of the input to be encoded.
+ FLAC__stream_encoder_set_sample_rate(encoder_,
+ static_cast<uint32>(sample_rate));
+ // Set the compression level. A higher level usually means more computation
+ // but higher compression.
+ FLAC__stream_encoder_set_compression_level(encoder_, kFLACCompressionLevel);
+
+ // Initialize the encoder instance to encode native FLAC stream.
+ FLAC__StreamEncoderInitStatus encoder_status =
+ FLAC__stream_encoder_init_stream(encoder_, WriteCallback, NULL, NULL,
+ NULL, this);
+ DCHECK_EQ(encoder_status, FLAC__STREAM_ENCODER_INIT_STATUS_OK);
+}
+
+AudioEncoderFlac::~AudioEncoderFlac() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ FLAC__stream_encoder_delete(encoder_);
+}
+
+void AudioEncoderFlac::Encode(const AudioBus* audio_bus) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ DCHECK_EQ(audio_bus->channels(), 1);
+ const float* audio_data = audio_bus->channel(0);
+ uint32 frames = static_cast<uint32>(audio_bus->frames());
+ scoped_array<FLAC__int32> flac_samples(new FLAC__int32[frames]);
+ for (uint32 i = 0; i < frames; ++i) {
+ flac_samples[i] =
+ static_cast<FLAC__int32>(audio_data[i] * kMaxInt16AsFloat32);
+ }
+
+ FLAC__int32* flac_samples_ptr = flac_samples.get();
+ // Submit data for encoding.
+ FLAC__bool success =
+ FLAC__stream_encoder_process(encoder_, &flac_samples_ptr, frames);
+ DCHECK(success);
+}
+
+void AudioEncoderFlac::Finish() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Finish the encoding. It causes the encoder to encode any data still in
+ // its input pipe, and finally reset the encoder to the unintialized state.
+ FLAC__stream_encoder_finish(encoder_);
+}
+
+std::string AudioEncoderFlac::GetMimeType() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ return std::string(kContentTypeFLAC) +
+ base::UintToString(FLAC__stream_encoder_get_sample_rate(encoder_));
+}
+
+std::string AudioEncoderFlac::GetAndClearAvailableEncodedData() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ std::string result = encoded_data_;
+ encoded_data_.clear();
+ return result;
+}
+
+// A write callback which will be called anytime there is a raw encoded data to
+// write. The call to FLAC__stream_encoder_init_stream() currently will also
+// immediately call the write callback several times, once with the FLAC
+// signature, and once for each encoded metadata block.
+FLAC__StreamEncoderWriteStatus AudioEncoderFlac::WriteCallback(
+ const FLAC__StreamEncoder* encoder, const FLAC__byte buffer[], size_t bytes,
+ unsigned int samples, unsigned int current_frame, void* client_data) {
+ UNREFERENCED_PARAMETER(encoder);
+ UNREFERENCED_PARAMETER(samples);
+ UNREFERENCED_PARAMETER(current_frame);
+
+ AudioEncoderFlac* audio_encoder =
+ reinterpret_cast<AudioEncoderFlac*>(client_data);
+ DCHECK(audio_encoder);
+ DCHECK(audio_encoder->thread_checker_.CalledOnValidThread());
+
+ audio_encoder->encoded_data_.append(reinterpret_cast<const char*>(buffer),
+ bytes);
+
+ return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
+}
+
+} // namespace speech
+} // namespace cobalt
diff --git a/src/cobalt/speech/audio_encoder_flac.h b/src/cobalt/speech/audio_encoder_flac.h
new file mode 100644
index 0000000..be993ca
--- /dev/null
+++ b/src/cobalt/speech/audio_encoder_flac.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COBALT_SPEECH_AUDIO_ENCODER_FLAC_H_
+#define COBALT_SPEECH_AUDIO_ENCODER_FLAC_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/callback.h"
+#include "base/threading/thread_checker.h"
+#include "media/base/audio_bus.h"
+#include "third_party/flac/include/FLAC/stream_encoder.h"
+
+namespace cobalt {
+namespace speech {
+
+// Encode raw audio to using FLAC codec.
+class AudioEncoderFlac {
+ public:
+ typedef ::media::AudioBus AudioBus;
+
+ explicit AudioEncoderFlac(int sample_rate);
+ ~AudioEncoderFlac();
+
+ // Encode raw audio data.
+ void Encode(const AudioBus* audio_bus);
+ // Finish encoding.
+ void Finish();
+
+ // Returns mime Type of audio data.
+ std::string GetMimeType() const;
+ // Returns and clears the available encoded audio data.
+ std::string GetAndClearAvailableEncodedData();
+
+ private:
+ // FLAC encoder callback.
+ static FLAC__StreamEncoderWriteStatus WriteCallback(
+ const FLAC__StreamEncoder* encoder, const FLAC__byte buffer[],
+ size_t bytes, unsigned int samples, unsigned int current_frame,
+ void* client_data);
+
+ base::ThreadChecker thread_checker_;
+ // FLAC encoder.
+ FLAC__StreamEncoder* encoder_;
+ // Cached encoded data.
+ std::string encoded_data_;
+};
+
+} // namespace speech
+} // namespace cobalt
+
+#endif // COBALT_SPEECH_AUDIO_ENCODER_FLAC_H_
diff --git a/src/cobalt/speech/speech.gyp b/src/cobalt/speech/speech.gyp
index 83960c6..5a43669 100644
--- a/src/cobalt/speech/speech.gyp
+++ b/src/cobalt/speech/speech.gyp
@@ -21,6 +21,8 @@
'target_name': 'speech',
'type': 'static_library',
'sources': [
+ 'audio_encoder_flac.cc',
+ 'audio_encoder_flac.h',
'mic.h',
'speech_recognition.cc',
'speech_recognition.h',
@@ -43,6 +45,7 @@
'dependencies': [
'<(DEPTH)/cobalt/base/base.gyp:base',
'<(DEPTH)/cobalt/dom/dom.gyp:dom',
+ '<(DEPTH)/third_party/flac/flac.gyp:libflac',
],
'conditions': [
['OS=="starboard"', {
diff --git a/src/cobalt/speech/speech_recognition.cc b/src/cobalt/speech/speech_recognition.cc
index 90383ae..e1155d6 100644
--- a/src/cobalt/speech/speech_recognition.cc
+++ b/src/cobalt/speech/speech_recognition.cc
@@ -28,7 +28,10 @@
SpeechRecognition::SpeechRecognition(script::EnvironmentSettings* settings)
: ALLOW_THIS_IN_INITIALIZER_LIST(
manager_(base::polymorphic_downcast<dom::DOMSettings*>(settings)
- ->fetcher_factory())),
+ ->fetcher_factory()
+ ->network_module(),
+ base::Bind(&SpeechRecognition::OnEventAvailable,
+ base::Unretained(this)))),
config_("" /*lang*/, false /*continuous*/, false /*interim_results*/,
1 /*max alternatives*/) {}
@@ -38,5 +41,10 @@
void SpeechRecognition::Abort() { NOTIMPLEMENTED(); }
+bool SpeechRecognition::OnEventAvailable(
+ const scoped_refptr<dom::Event>& event) {
+ return DispatchEvent(event);
+}
+
} // namespace speech
} // namespace cobalt
diff --git a/src/cobalt/speech/speech_recognition.h b/src/cobalt/speech/speech_recognition.h
index 4622a9c..9a24a85 100644
--- a/src/cobalt/speech/speech_recognition.h
+++ b/src/cobalt/speech/speech_recognition.h
@@ -107,6 +107,9 @@
private:
~SpeechRecognition() OVERRIDE {}
+ // Callback from recognition manager.
+ bool OnEventAvailable(const scoped_refptr<dom::Event>& event);
+
// Handles main operations of speech recognition including audio encoding,
// mic audio retrieving and audio data recognizing.
SpeechRecognitionManager manager_;
diff --git a/src/cobalt/speech/speech_recognition_manager.cc b/src/cobalt/speech/speech_recognition_manager.cc
index 61b50c7..6f6966e 100644
--- a/src/cobalt/speech/speech_recognition_manager.cc
+++ b/src/cobalt/speech/speech_recognition_manager.cc
@@ -23,20 +23,27 @@
namespace {
const int kSampleRate = 16000;
+const float kAudioPacketDurationInSeconds = 0.1f;
} // namespace
SpeechRecognitionManager::SpeechRecognitionManager(
- loader::FetcherFactory* fetcher_factory)
+ network::NetworkModule* network_module, const EventCallback& event_callback)
: ALLOW_THIS_IN_INITIALIZER_LIST(weak_ptr_factory_(this)),
weak_this_(weak_ptr_factory_.GetWeakPtr()),
main_message_loop_(base::MessageLoopProxy::current()),
- recognizer_(fetcher_factory),
+ event_callback_(event_callback),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ recognizer_(network_module,
+ base::Bind(&SpeechRecognitionManager::OnRecognizerResult,
+ base::Unretained(this)),
+ base::Bind(&SpeechRecognitionManager::OnRecognizerError,
+ base::Unretained(this)))),
ALLOW_THIS_IN_INITIALIZER_LIST(mic_(Mic::Create(
kSampleRate, base::Bind(&SpeechRecognitionManager::OnDataReceived,
base::Unretained(this)),
base::Bind(&SpeechRecognitionManager::OnDataCompletion,
base::Unretained(this)),
- base::Bind(&SpeechRecognitionManager::OnError,
+ base::Bind(&SpeechRecognitionManager::OnMicError,
base::Unretained(this))))) {}
SpeechRecognitionManager::~SpeechRecognitionManager() { Stop(); }
@@ -44,7 +51,7 @@
void SpeechRecognitionManager::Start(const SpeechRecognitionConfig& config) {
DCHECK(main_message_loop_->BelongsToCurrentThread());
- recognizer_.Start(config);
+ recognizer_.Start(config, kSampleRate);
mic_->Start();
}
@@ -64,9 +71,7 @@
return;
}
- // TODO: Encode audio data, and then send it to recognizer. After
- // receiving the recognition result from recognizer, fire a speech recognition
- // event.
+ recognizer_.RecognizeAudio(audio_bus.Pass(), false);
}
void SpeechRecognitionManager::OnDataCompletion() {
@@ -78,19 +83,56 @@
return;
}
- // TODO: Handle the case that no audio data would be received
- // afterwards.
+ // The encoder requires a non-empty final buffer, so encoding a packet of
+ // silence at the end in case encoder had no data already.
+ size_t dummy_frames =
+ static_cast<size_t>(kSampleRate * kAudioPacketDurationInSeconds);
+ scoped_ptr<AudioBus> dummy_audio_bus =
+ AudioBus::Create(1, static_cast<int>(dummy_frames));
+ memset(dummy_audio_bus->channel(0), 0, dummy_frames);
+ recognizer_.RecognizeAudio(dummy_audio_bus.Pass(), true);
}
-void SpeechRecognitionManager::OnError() {
+void SpeechRecognitionManager::OnRecognizerResult(
+ const scoped_refptr<SpeechRecognitionEvent>& event) {
if (!main_message_loop_->BelongsToCurrentThread()) {
- // Called from mic thread.
+ // Called from recognizer thread.
main_message_loop_->PostTask(
- FROM_HERE, base::Bind(&SpeechRecognitionManager::OnError, weak_this_));
+ FROM_HERE, base::Bind(&SpeechRecognitionManager::OnRecognizerResult,
+ weak_this_, event));
return;
}
- // TODO: Handle the case that an error occurred.
+ event_callback_.Run(event);
+}
+
+void SpeechRecognitionManager::OnRecognizerError() {
+ if (!main_message_loop_->BelongsToCurrentThread()) {
+ // Called from recognizer thread.
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&SpeechRecognitionManager::OnRecognizerError, weak_this_));
+ return;
+ }
+
+ // TODO: Could be other error types based on the recognizer response.
+ event_callback_.Run(
+ scoped_refptr<SpeechRecognitionError>(new SpeechRecognitionError(
+ SpeechRecognitionError::kNetwork, "Recognition Failed.")));
+}
+
+void SpeechRecognitionManager::OnMicError() {
+ if (!main_message_loop_->BelongsToCurrentThread()) {
+ // Called from mic thread.
+ main_message_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&SpeechRecognitionManager::OnMicError, weak_this_));
+ return;
+ }
+
+ event_callback_.Run(
+ scoped_refptr<SpeechRecognitionError>(new SpeechRecognitionError(
+ SpeechRecognitionError::kAborted, "Mic Disconnected.")));
}
} // namespace speech
diff --git a/src/cobalt/speech/speech_recognition_manager.h b/src/cobalt/speech/speech_recognition_manager.h
index 022d82c..dda56b2 100644
--- a/src/cobalt/speech/speech_recognition_manager.h
+++ b/src/cobalt/speech/speech_recognition_manager.h
@@ -19,9 +19,11 @@
#include <string>
-#include "cobalt/loader/fetcher_factory.h"
+#include "cobalt/network/network_module.h"
#include "cobalt/speech/mic.h"
#include "cobalt/speech/speech_recognition_config.h"
+#include "cobalt/speech/speech_recognition_error.h"
+#include "cobalt/speech/speech_recognition_event.h"
#include "cobalt/speech/speech_recognizer.h"
namespace cobalt {
@@ -35,8 +37,10 @@
class SpeechRecognitionManager {
public:
typedef ::media::AudioBus AudioBus;
+ typedef base::Callback<bool(const scoped_refptr<dom::Event>&)> EventCallback;
- explicit SpeechRecognitionManager(loader::FetcherFactory* fetcher_factory);
+ SpeechRecognitionManager(network::NetworkModule* network_module,
+ const EventCallback& event_callback);
~SpeechRecognitionManager();
// Start/Stop speech recognizer and microphone. Multiple calls would be
@@ -48,14 +52,20 @@
// Callbacks from mic.
void OnDataReceived(scoped_ptr<AudioBus> audio_bus);
void OnDataCompletion();
- void OnError();
+ void OnMicError();
+
+ // Callbacks from recognizer.
+ void OnRecognizerResult(const scoped_refptr<SpeechRecognitionEvent>& event);
+ void OnRecognizerError();
base::WeakPtrFactory<SpeechRecognitionManager> weak_ptr_factory_;
// We construct a WeakPtr upon SpeechRecognitionManager's construction in
// order to associate the WeakPtr with the constructing thread.
base::WeakPtr<SpeechRecognitionManager> weak_this_;
-
scoped_refptr<base::MessageLoopProxy> const main_message_loop_;
+
+ // Callback for sending dom events if available.
+ EventCallback event_callback_;
SpeechRecognizer recognizer_;
scoped_ptr<Mic> mic_;
};
diff --git a/src/cobalt/speech/speech_recognizer.cc b/src/cobalt/speech/speech_recognizer.cc
index 6355ae7..f46df90 100644
--- a/src/cobalt/speech/speech_recognizer.cc
+++ b/src/cobalt/speech/speech_recognizer.cc
@@ -17,14 +17,65 @@
#include "cobalt/speech/speech_recognizer.h"
#include "base/bind.h"
+#include "base/rand_util.h"
+#include "base/string_number_conversions.h"
+#include "base/string_util.h"
+#include "base/utf_string_conversions.h"
+#include "cobalt/loader/fetcher_factory.h"
+#include "cobalt/network/network_module.h"
+#include "net/base/escape.h"
+#include "net/url_request/url_fetcher.h"
namespace cobalt {
namespace speech {
-SpeechRecognizer::SpeechRecognizer(loader::FetcherFactory* fetcher_factory)
- : fetcher_factory_(fetcher_factory),
+namespace {
+const char kBaseStreamURL[] =
+ "https://www.google.com/speech-api/full-duplex/v1";
+// TODO: hide this key to somewhere else.
+const char kSpeechAPIKey[] = "";
+const char kUp[] = "up";
+const char kDown[] = "down";
+const char kClient[] = "com.speech.tv";
+
+GURL AppendPath(const GURL& url, const std::string& value) {
+ std::string path(url.path());
+
+ if (!path.empty()) path += "/";
+
+ path += net::EscapePath(value);
+ GURL::Replacements replacements;
+ replacements.SetPathStr(path);
+ return url.ReplaceComponents(replacements);
+}
+
+GURL AppendQueryParameter(const GURL& url, const std::string& new_query,
+ const std::string& value) {
+ std::string query(url.query());
+
+ if (!query.empty()) query += "&";
+
+ query += net::EscapeQueryParamValue(new_query, true);
+
+ if (!value.empty()) {
+ query += "=" + net::EscapeQueryParamValue(value, true);
+ }
+
+ GURL::Replacements replacements;
+ replacements.SetQueryStr(query);
+ return url.ReplaceComponents(replacements);
+}
+
+} // namespace
+
+SpeechRecognizer::SpeechRecognizer(network::NetworkModule* network_module,
+ const ResultCallback& result_callback,
+ const ErrorCallback& error_callback)
+ : network_module_(network_module),
thread_("speech_recognizer"),
- started_(false) {
+ started_(false),
+ result_callback_(result_callback),
+ error_callback_(error_callback) {
thread_.StartWithOptions(base::Thread::Options(MessageLoop::TYPE_IO, 0));
}
@@ -32,11 +83,12 @@
Stop();
}
-void SpeechRecognizer::Start(const SpeechRecognitionConfig& config) {
+void SpeechRecognizer::Start(const SpeechRecognitionConfig& config,
+ int sample_rate) {
// Called by the speech recognition manager thread.
- thread_.message_loop()->PostTask(FROM_HERE,
- base::Bind(&SpeechRecognizer::StartInternal,
- base::Unretained(this), config));
+ thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&SpeechRecognizer::StartInternal,
+ base::Unretained(this), config, sample_rate));
}
void SpeechRecognizer::Stop() {
@@ -46,21 +98,19 @@
base::Bind(&SpeechRecognizer::StopInternal, base::Unretained(this)));
}
-void SpeechRecognizer::RecognizeAudio(scoped_array<uint8> encoded_audio_data,
- size_t size, bool is_last_chunk) {
+void SpeechRecognizer::RecognizeAudio(scoped_ptr<AudioBus> audio_bus,
+ bool is_last_chunk) {
// Called by the speech recognition manager thread.
thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&SpeechRecognizer::UploadAudioDataInternal,
- base::Unretained(this), base::Passed(&encoded_audio_data),
- size, is_last_chunk));
+ FROM_HERE, base::Bind(&SpeechRecognizer::UploadAudioDataInternal,
+ base::Unretained(this), base::Passed(&audio_bus),
+ is_last_chunk));
}
void SpeechRecognizer::OnURLFetchDownloadData(
const net::URLFetcher* source, scoped_ptr<std::string> download_data) {
DCHECK_EQ(thread_.message_loop(), MessageLoop::current());
-
- // TODO: process the download data.
+ // TODO: Parse the serialized protocol buffers data.
NOTIMPLEMENTED();
UNREFERENCED_PARAMETER(source);
@@ -70,45 +120,100 @@
void SpeechRecognizer::OnURLFetchComplete(const net::URLFetcher* source) {
DCHECK_EQ(thread_.message_loop(), MessageLoop::current());
UNREFERENCED_PARAMETER(source);
+ started_ = false;
}
-void SpeechRecognizer::StartInternal(const SpeechRecognitionConfig& config) {
+void SpeechRecognizer::StartInternal(const SpeechRecognitionConfig& config,
+ int sample_rate) {
DCHECK_EQ(thread_.message_loop(), MessageLoop::current());
+
if (started_) {
// Recognizer is already started.
return;
}
started_ = true;
- // TODO: set up url fetchers with this URLFetcherDelegate.
- NOTIMPLEMENTED();
+ encoder_.reset(new AudioEncoderFlac(sample_rate));
- UNREFERENCED_PARAMETER(config);
- UNREFERENCED_PARAMETER(fetcher_factory_);
+ // Required for streaming on both up and down connections.
+ std::string pair = base::Uint64ToString(base::RandUint64());
+
+ // Set up down stream first.
+ GURL down_url(kBaseStreamURL);
+ down_url = AppendPath(down_url, kDown);
+ down_url = AppendQueryParameter(down_url, "pair", pair);
+ // Use protobuffer as the output format.
+ down_url = AppendQueryParameter(down_url, "output", "pb");
+
+ downstream_fetcher_.reset(
+ net::URLFetcher::Create(down_url, net::URLFetcher::GET, this));
+ downstream_fetcher_->SetRequestContext(
+ network_module_->url_request_context_getter());
+ downstream_fetcher_->Start();
+
+ // Up stream.
+ GURL up_url(kBaseStreamURL);
+ up_url = AppendPath(up_url, kUp);
+ up_url = AppendQueryParameter(up_url, "client", kClient);
+ up_url = AppendQueryParameter(up_url, "pair", pair);
+ up_url = AppendQueryParameter(up_url, "output", "pb");
+ up_url = AppendQueryParameter(up_url, "key", kSpeechAPIKey);
+
+ if (!config.lang.empty()) {
+ up_url = AppendQueryParameter(up_url, "lang", config.lang);
+ }
+
+ if (config.max_alternatives) {
+ up_url = AppendQueryParameter(up_url, "maxAlternatives",
+ base::UintToString(config.max_alternatives));
+ }
+
+ if (config.continuous) {
+ up_url = AppendQueryParameter(up_url, "continuous", "");
+ }
+ if (config.interim_results) {
+ up_url = AppendQueryParameter(up_url, "interim", "");
+ }
+
+ upstream_fetcher_.reset(
+ net::URLFetcher::Create(up_url, net::URLFetcher::POST, this));
+ upstream_fetcher_->SetRequestContext(
+ network_module_->url_request_context_getter());
+ upstream_fetcher_->SetChunkedUpload(encoder_->GetMimeType());
+ upstream_fetcher_->Start();
}
void SpeechRecognizer::StopInternal() {
DCHECK_EQ(thread_.message_loop(), MessageLoop::current());
+
if (!started_) {
// Recognizer is not started.
return;
}
started_ = false;
- // TODO: terminate url fetchers.
- NOTIMPLEMENTED();
+ upstream_fetcher_.reset();
+ downstream_fetcher_.reset();
+ encoder_.reset();
}
-void SpeechRecognizer::UploadAudioDataInternal(
- scoped_array<uint8> encoded_audio_data, size_t size, bool is_last_chunk) {
+void SpeechRecognizer::UploadAudioDataInternal(scoped_ptr<AudioBus> audio_bus,
+ bool is_last_chunk) {
DCHECK_EQ(thread_.message_loop(), MessageLoop::current());
+ DCHECK(audio_bus);
- // TODO: upload encoded audio data chunk.
- NOTIMPLEMENTED();
+ std::string encoded_audio_data;
+ if (encoder_) {
+ encoder_->Encode(audio_bus.get());
+ if (is_last_chunk) {
+ encoder_->Finish();
+ }
+ encoded_audio_data = encoder_->GetAndClearAvailableEncodedData();
+ }
- UNREFERENCED_PARAMETER(encoded_audio_data);
- UNREFERENCED_PARAMETER(size);
- UNREFERENCED_PARAMETER(is_last_chunk);
+ if (upstream_fetcher_ && !encoded_audio_data.empty()) {
+ upstream_fetcher_->AppendChunkToUpload(encoded_audio_data, is_last_chunk);
+ }
}
} // namespace speech
diff --git a/src/cobalt/speech/speech_recognizer.h b/src/cobalt/speech/speech_recognizer.h
index c1900c7..7b54584 100644
--- a/src/cobalt/speech/speech_recognizer.h
+++ b/src/cobalt/speech/speech_recognizer.h
@@ -18,17 +18,22 @@
#define COBALT_SPEECH_SPEECH_RECOGNIZER_H_
#include <string>
+#include <vector>
#include "base/threading/thread.h"
-#include "cobalt/loader/fetcher_factory.h"
+#include "cobalt/network/network_module.h"
+#include "cobalt/speech/audio_encoder_flac.h"
#include "cobalt/speech/speech_recognition_config.h"
+#include "cobalt/speech/speech_recognition_event.h"
+#include "media/base/audio_bus.h"
+#include "net/url_request/url_fetcher.h"
#include "net/url_request/url_fetcher_delegate.h"
namespace cobalt {
namespace speech {
-// Interacts with Google speech recogniton service, and then parses recognition
-// results and forms speech recogniton event.
+// Interacts with Google speech recognition service, and then parses recognition
+// results and forms speech recognition event.
// It creates an upstream fetcher to upload the encoded audio and a downstream
// fetcher to fetch the speech recognition result. The fetched speech
// recognition result is parsed by JSON parser and a SpeechRecognitionEvent,
@@ -36,18 +41,24 @@
// manager.
class SpeechRecognizer : public net::URLFetcherDelegate {
public:
- explicit SpeechRecognizer(loader::FetcherFactory* fetcher_factory);
+ typedef ::media::AudioBus AudioBus;
+ typedef base::Callback<void(const scoped_refptr<SpeechRecognitionEvent>&)>
+ ResultCallback;
+ typedef base::Callback<void(void)> ErrorCallback;
+
+ SpeechRecognizer(network::NetworkModule* network_module,
+ const ResultCallback& result_callback,
+ const ErrorCallback& error_callback);
~SpeechRecognizer() OVERRIDE;
// Multiple calls to Start/Stop are allowed, the implementation should take
// care of multiple calls.
// Start speech recognizer.
- void Start(const SpeechRecognitionConfig& config);
+ void Start(const SpeechRecognitionConfig& config, int sample_rate);
// Stop speech recognizer.
void Stop();
// An encoded audio data is available and ready to be recognized.
- void RecognizeAudio(scoped_array<uint8> encoded_audio_data, size_t size,
- bool is_last_chunk);
+ void RecognizeAudio(scoped_ptr<AudioBus> audio_bus, bool is_last_chunk);
// net::URLFetcherDelegate interface
void OnURLFetchDownloadData(const net::URLFetcher* source,
@@ -58,18 +69,26 @@
int64 /*current*/, int64 /*total*/) OVERRIDE {}
private:
- void StartInternal(const SpeechRecognitionConfig& config);
+ void StartInternal(const SpeechRecognitionConfig& config, int sample_rate);
void StopInternal();
-
- void UploadAudioDataInternal(scoped_array<uint8> encoded_audio_data,
- size_t size, bool is_last_chunk);
+ void UploadAudioDataInternal(scoped_ptr<AudioBus> audio_bus,
+ bool is_last_chunk);
// This is used for creating fetchers.
- loader::FetcherFactory* fetcher_factory_;
+ network::NetworkModule* network_module_;
// Speech recognizer is operating in its own thread.
base::Thread thread_;
// Track the start/stop state of speech recognizer.
bool started_;
+
+ // Encoder for encoding raw audio data to flac codec.
+ scoped_ptr<AudioEncoderFlac> encoder_;
+ // Fetcher for posting the audio data.
+ scoped_ptr<net::URLFetcher> upstream_fetcher_;
+ // Fetcher for receiving the streaming results.
+ scoped_ptr<net::URLFetcher> downstream_fetcher_;
+ ResultCallback result_callback_;
+ ErrorCallback error_callback_;
};
} // namespace speech
diff --git a/src/cobalt/storage/savegame.h b/src/cobalt/storage/savegame.h
index efb4adc..35005e4 100644
--- a/src/cobalt/storage/savegame.h
+++ b/src/cobalt/storage/savegame.h
@@ -52,6 +52,8 @@
// Delete the savegame file when the Savegame object goes out of scope.
// This should only be used by tests.
bool delete_on_destruction;
+ // Initial data to return from Read. Only for tests.
+ ByteVector test_initial_data;
};
static scoped_ptr<Savegame> Create(const Options& options);
@@ -83,7 +85,6 @@
DISALLOW_COPY_AND_ASSIGN(Savegame);
};
-
} // namespace storage
} // namespace cobalt
diff --git a/src/cobalt/storage/savegame_fake.cc b/src/cobalt/storage/savegame_fake.cc
index 482e200..9866e6d 100644
--- a/src/cobalt/storage/savegame_fake.cc
+++ b/src/cobalt/storage/savegame_fake.cc
@@ -23,6 +23,13 @@
Savegame::ByteVector* SavegameFake::s_persistent_data_;
+SavegameFake::SavegameFake(const Options& options) : Savegame(options) {
+ if (options.test_initial_data.size() > 0 && !s_persistent_data_) {
+ s_persistent_data_ = new ByteVector();
+ *s_persistent_data_ = options.test_initial_data;
+ }
+}
+
bool SavegameFake::PlatformRead(ByteVector* bytes_ptr) {
if (s_persistent_data_) {
*bytes_ptr = *s_persistent_data_;
diff --git a/src/cobalt/storage/savegame_fake.h b/src/cobalt/storage/savegame_fake.h
index 7350c5f..a7e0a4a 100644
--- a/src/cobalt/storage/savegame_fake.h
+++ b/src/cobalt/storage/savegame_fake.h
@@ -30,7 +30,7 @@
// We only support a single "file".
class SavegameFake : public Savegame {
public:
- explicit SavegameFake(const Options& options) : Savegame(options) {}
+ explicit SavegameFake(const Options& options);
~SavegameFake() OVERRIDE {
if (options_.delete_on_destruction) {
diff --git a/src/cobalt/storage/storage_manager.cc b/src/cobalt/storage/storage_manager.cc
index d7d2e9c..decba88 100644
--- a/src/cobalt/storage/storage_manager.cc
+++ b/src/cobalt/storage/storage_manager.cc
@@ -23,6 +23,7 @@
#include "base/debug/trace_event.h"
#include "base/stringprintf.h"
#include "cobalt/storage/savegame_thread.h"
+#include "cobalt/storage/upgrade/upgrade_reader.h"
#include "cobalt/storage/virtual_file.h"
#include "cobalt/storage/virtual_file_system.h"
#include "sql/statement.h"
@@ -134,8 +135,10 @@
} // namespace
-StorageManager::StorageManager(const Options& options)
- : options_(options),
+StorageManager::StorageManager(scoped_ptr<UpgradeHandler> upgrade_handler,
+ const Options& options)
+ : upgrade_handler_(upgrade_handler.Pass()),
+ options_(options),
sql_thread_(new base::Thread("StorageManager SQL")),
ALLOW_THIS_IN_INITIALIZER_LIST(sql_context_(new SqlContext(this))),
connection_(new sql::Connection()),
@@ -145,6 +148,7 @@
flush_requested_(false),
no_flushes_pending_(true /* manual reset */,
true /* initially signalled */) {
+ DCHECK(upgrade_handler_);
TRACE_EVENT0("cobalt::storage", __FUNCTION__);
savegame_thread_.reset(new SavegameThread(options_.savegame_options));
// Start the savegame load immediately.
@@ -249,6 +253,7 @@
sql_vfs_.reset(new SqlVfs("cobalt_vfs", vfs_.get()));
flush_timer_.reset(new base::OneShotTimer<StorageManager>());
// Savegame has finished loading. Now initialize the database connection.
+ // Check if this is upgrade data, if so, handle it, otherwise:
// Check if the savegame data contains a VFS header.
// If so, proceed to deserialize it.
// If not, load the file into the VFS directly.
@@ -257,18 +262,25 @@
DCHECK(loaded_raw_bytes);
Savegame::ByteVector& raw_bytes = *loaded_raw_bytes;
VirtualFileSystem::SerializedHeader header = {};
- if (raw_bytes.size() > 0) {
- if (raw_bytes.size() >= sizeof(VirtualFileSystem::SerializedHeader)) {
- memcpy(&header, &raw_bytes[0],
- sizeof(VirtualFileSystem::SerializedHeader));
- }
- if (VirtualFileSystem::GetHeaderVersion(header) == -1) {
- VirtualFile* vf = vfs_->Open(kDefaultSaveFile);
- vf->Write(&raw_bytes[0], static_cast<int>(raw_bytes.size()),
- 0 /* offset */);
+ if (raw_bytes.size() > 0) {
+ const char* buffer = reinterpret_cast<char*>(&raw_bytes[0]);
+ int buffer_size = static_cast<int>(raw_bytes.size());
+ // Is this upgrade data?
+ if (upgrade::UpgradeReader::IsUpgradeData(buffer, buffer_size)) {
+ upgrade_handler_->OnUpgrade(this, buffer, buffer_size);
} else {
- vfs_->Deserialize(&raw_bytes[0], static_cast<int>(raw_bytes.size()));
+ if (raw_bytes.size() >= sizeof(VirtualFileSystem::SerializedHeader)) {
+ memcpy(&header, &raw_bytes[0],
+ sizeof(VirtualFileSystem::SerializedHeader));
+ }
+
+ if (VirtualFileSystem::GetHeaderVersion(header) == -1) {
+ VirtualFile* vf = vfs_->Open(kDefaultSaveFile);
+ vf->Write(&raw_bytes[0], buffer_size, 0 /* offset */);
+ } else {
+ vfs_->Deserialize(&raw_bytes[0], buffer_size);
+ }
}
}
diff --git a/src/cobalt/storage/storage_manager.h b/src/cobalt/storage/storage_manager.h
index f2a2ad7..021e0f8 100644
--- a/src/cobalt/storage/storage_manager.h
+++ b/src/cobalt/storage/storage_manager.h
@@ -28,6 +28,7 @@
#include "base/timer.h"
#include "cobalt/storage/savegame_thread.h"
#include "cobalt/storage/sql_vfs.h"
+#include "cobalt/storage/upgrade/upgrade_reader.h"
#include "cobalt/storage/virtual_file_system.h"
#include "sql/connection.h"
@@ -46,6 +47,16 @@
// is loaded.
class StorageManager {
public:
+ // Support for "upgrade" of legacy save data that may have been generated by
+ // a platform other than Steel/Cobalt. If save data in the upgrade format is
+ // detected, the |OnUpgrade| method will be called on |upgrade_handler_|.
+ class UpgradeHandler {
+ public:
+ virtual ~UpgradeHandler() {}
+ virtual void OnUpgrade(StorageManager* storage, const char* data,
+ int size) = 0;
+ };
+
struct Options {
Savegame::Options savegame_options;
};
@@ -66,7 +77,8 @@
kSchemaVersionLost = -2,
};
- explicit StorageManager(const Options& options);
+ StorageManager(scoped_ptr<UpgradeHandler> upgrade_handler,
+ const Options& options);
virtual ~StorageManager();
// Obtain the SqlContext for our database.
@@ -89,6 +101,8 @@
const Options& options() const { return options_; }
+ UpgradeHandler* upgrade_handler() const { return upgrade_handler_.get(); }
+
protected:
// Queues a flush to be executed as soon as possible. As soon as possible
// will be as soon as any existing flush completes, or right away if no
@@ -131,6 +145,9 @@
bool GetSchemaVersion(const char* table_name, int* schema_version);
void UpdateSchemaVersion(const char* table_name, int version);
+ // Upgrade handler used if upgrade save data is detected.
+ scoped_ptr<UpgradeHandler> upgrade_handler_;
+
// Configuration options for the Storage Manager.
Options options_;
@@ -233,7 +250,8 @@
explicit SqlContext(StorageManager* storage_manager)
: storage_manager_(storage_manager) {}
- friend StorageManager::StorageManager(const Options& options);
+ friend StorageManager::StorageManager(
+ scoped_ptr<UpgradeHandler> upgrade_handler, const Options& options);
DISALLOW_COPY_AND_ASSIGN(SqlContext);
};
diff --git a/src/cobalt/storage/storage_manager_test.cc b/src/cobalt/storage/storage_manager_test.cc
index fbd0c9c..7160854 100644
--- a/src/cobalt/storage/storage_manager_test.cc
+++ b/src/cobalt/storage/storage_manager_test.cc
@@ -31,6 +31,8 @@
using ::testing::_;
using ::testing::InvokeWithoutArgs;
+using ::testing::NotNull;
+using ::testing::Eq;
namespace cobalt {
namespace storage {
@@ -40,11 +42,17 @@
// Used to be able to intercept QueueFlush().
class MockStorageManager : public StorageManager {
public:
- explicit MockStorageManager(const Options& options)
- : StorageManager(options) {}
+ MockStorageManager(scoped_ptr<StorageManager::UpgradeHandler> upgrade_handler,
+ const Options& options)
+ : StorageManager(upgrade_handler.Pass(), options) {}
MOCK_METHOD1(QueueFlush, void(const base::Closure& callback));
};
+class MockUpgradeHandler : public StorageManager::UpgradeHandler {
+ public:
+ MOCK_METHOD3(OnUpgrade, void(StorageManager*, const char*, int));
+};
+
class CallbackWaiter {
public:
CallbackWaiter() : was_called_event_(true, false) {}
@@ -120,15 +128,22 @@
~StorageManagerTest() { storage_manager_.reset(NULL); }
template <typename StorageManagerType>
- void Init(bool delete_savegame = true) {
+ void Init(bool delete_savegame = true,
+ const Savegame::ByteVector* initial_data = NULL) {
// Destroy the current one first. We can't have two VFSs with the same name
// concurrently.
storage_manager_.reset(NULL);
- StorageManager::Options options;
+ scoped_ptr<StorageManager::UpgradeHandler> upgrade_handler(
+ new MockUpgradeHandler());
+ StorageManager::Options options;
options.savegame_options.delete_on_destruction = delete_savegame;
options.savegame_options.factory = &SavegameFake::Create;
- storage_manager_.reset(new StorageManagerType(options));
+ if (initial_data) {
+ options.savegame_options.test_initial_data = *initial_data;
+ }
+ storage_manager_.reset(
+ new StorageManagerType(upgrade_handler.Pass(), options));
}
MessageLoop message_loop_;
@@ -196,5 +211,27 @@
EXPECT_EQ(true, waiter.TimedWait());
}
+TEST_F(StorageManagerTest, Upgrade) {
+ Savegame::ByteVector initial_data;
+ initial_data.push_back('U');
+ initial_data.push_back('P');
+ initial_data.push_back('G');
+ initial_data.push_back('0');
+ Init<StorageManager>(true, &initial_data);
+
+ // We expect a call to the upgrade handler when it reads this data.
+ MockUpgradeHandler& upgrade_handler =
+ *dynamic_cast<MockUpgradeHandler*>(storage_manager_->upgrade_handler());
+ EXPECT_CALL(upgrade_handler,
+ OnUpgrade(Eq(storage_manager_.get()), NotNull(), Eq(4)))
+ .Times(1);
+
+ FlushWaiter waiter;
+ storage_manager_->FlushNow(
+ base::Bind(&FlushWaiter::OnFlushDone, base::Unretained(&waiter)));
+ EXPECT_EQ(true, waiter.TimedWait());
+ message_loop_.RunUntilIdle();
+}
+
} // namespace storage
} // namespace cobalt
diff --git a/src/cobalt/storage/upgrade/schema_v1.proto b/src/cobalt/storage/upgrade/schema_v1.proto
index 6907806..c2516a5 100644
--- a/src/cobalt/storage/upgrade/schema_v1.proto
+++ b/src/cobalt/storage/upgrade/schema_v1.proto
@@ -37,7 +37,7 @@
// A single cookie.
mesage Cookie {
- // URL in canonical form, e.g. "https://www.youtube.com/". Must be provided
+ // URL in canonical form, e.g. "https://www.example.com/". Must be provided
// or the cookie will be ignored.
string url = 1;
@@ -48,7 +48,7 @@
// or the cookie will be ignored.
string value = 3;
- // Domain, e.g. ".youtube.com". Defaults to the domain of the "url" field.
+ // Domain, e.g. ".example.com". Defaults to the domain of the "url" field.
string domain = 4;
// Optional virtual path, defaults to "/".
diff --git a/src/cobalt/storage/upgrade/storage_upgrade_test.cc b/src/cobalt/storage/upgrade/storage_upgrade_test.cc
index e7b64f0..8728b7e 100644
--- a/src/cobalt/storage/upgrade/storage_upgrade_test.cc
+++ b/src/cobalt/storage/upgrade/storage_upgrade_test.cc
@@ -34,9 +34,6 @@
namespace {
-const int kHeaderSize = 4;
-const char kHeader[] = "UPG0";
-
void ReadFileToString(const char* pathname, std::string* string_out) {
EXPECT_TRUE(pathname);
EXPECT_TRUE(string_out);
@@ -44,9 +41,11 @@
EXPECT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &file_path));
file_path = file_path.Append(pathname);
EXPECT_TRUE(file_util::ReadFileToString(file_path, string_out));
- EXPECT_GT(string_out->length(), static_cast<size_t>(kHeaderSize));
- EXPECT_LE(string_out->length(), static_cast<size_t>(10 * 1024 * 1024));
- EXPECT_EQ(string_out->find(kHeader), 0);
+ const char* data = string_out->c_str();
+ const int size = static_cast<int>(string_out->length());
+ EXPECT_GT(size, 0);
+ EXPECT_LE(size, 10 * 1024 * 1024);
+ EXPECT_TRUE(UpgradeReader::IsUpgradeData(data, size));
}
void ValidateCookie(const net::CanonicalCookie* cookie, const std::string& url,
@@ -94,14 +93,13 @@
std::string file_contents;
ReadFileToString("cobalt/storage/upgrade/testdata/minimal_cookie_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// 1 cookie.
EXPECT_EQ(upgrade_reader.GetNumCookies(), 1);
const net::CanonicalCookie* cookie = upgrade_reader.GetCookie(0);
- ValidateCookie(cookie, "https://www.youtube.com/", "cookie_name",
+ ValidateCookie(cookie, "https://www.example.com/", "cookie_name",
"cookie_value");
EXPECT_FALSE(upgrade_reader.GetCookie(1));
@@ -115,9 +113,8 @@
ReadFileToString(
"cobalt/storage/upgrade/testdata/minimal_local_storage_entry_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// 0 cookies.
EXPECT_EQ(upgrade_reader.GetNumCookies(), 0);
@@ -135,24 +132,23 @@
std::string file_contents;
ReadFileToString("cobalt/storage/upgrade/testdata/full_data_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// 2 cookies.
EXPECT_EQ(upgrade_reader.GetNumCookies(), 2);
const net::CanonicalCookie* cookie = upgrade_reader.GetCookie(0);
base::Time creation = base::Time::FromInternalValue(13119668760000000L);
base::Time expiration = base::Time::FromInternalValue(13120000000000000L);
- ValidateCookie(cookie, "https://www.youtube.com/", "cookie_name",
- "cookie_value", "cookie.domain", "cookie/path", creation,
- expiration, true);
+ ValidateCookie(cookie, "https://www.example.com/1", "cookie_name",
+ "cookie_value", ".example.com", "/1", creation, expiration,
+ true);
cookie = upgrade_reader.GetCookie(1);
creation = base::Time::FromInternalValue(13109668760000000L);
expiration = base::Time::FromInternalValue(13110000000000000L);
- ValidateCookie(cookie, "https://www.somewhere.com/", "cookie_name_2",
- "cookie_value_2", "cookie.domain2", "cookie/path/2", creation,
- expiration, true);
+ ValidateCookie(cookie, "https://www.somewhere.com/2", "cookie_name_2",
+ "cookie_value_2", ".somewhere.com", "/2", creation, expiration,
+ true);
EXPECT_FALSE(upgrade_reader.GetCookie(2));
// 2 local storage entries.
@@ -169,9 +165,8 @@
std::string file_contents;
ReadFileToString("cobalt/storage/upgrade/testdata/missing_fields_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// 1 cookie with missing fields, 2 local storage entries with missing fields,
// 1 valid local storage entry.
@@ -188,9 +183,8 @@
std::string file_contents;
ReadFileToString("cobalt/storage/upgrade/testdata/malformed_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// No cookies or local storage entries available in malformed data.
EXPECT_EQ(upgrade_reader.GetNumCookies(), 0);
@@ -203,14 +197,13 @@
std::string file_contents;
ReadFileToString("cobalt/storage/upgrade/testdata/extra_fields_v1.json",
&file_contents);
- UpgradeReader upgrade_reader(
- file_contents.c_str() + kHeaderSize,
- static_cast<int>(file_contents.length()) - kHeaderSize);
+ UpgradeReader upgrade_reader(file_contents.c_str(),
+ static_cast<int>(file_contents.length()));
// 1 cookie, extra fields should be ignored.
EXPECT_EQ(upgrade_reader.GetNumCookies(), 1);
const net::CanonicalCookie* cookie = upgrade_reader.GetCookie(0);
- ValidateCookie(cookie, "https://www.youtube.com/", "cookie_name",
+ ValidateCookie(cookie, "https://www.example.com/", "cookie_name",
"cookie_value");
EXPECT_FALSE(upgrade_reader.GetCookie(1));
diff --git a/src/cobalt/storage/upgrade/testdata/extra_fields_v1.json b/src/cobalt/storage/upgrade/testdata/extra_fields_v1.json
index 7709e50..ba7211e 100644
--- a/src/cobalt/storage/upgrade/testdata/extra_fields_v1.json
+++ b/src/cobalt/storage/upgrade/testdata/extra_fields_v1.json
@@ -2,7 +2,7 @@
{
"cookies": [
{
- "url": "https://www.youtube.com",
+ "url": "https://www.example.com",
"name": "cookie_name",
"value": "cookie_value",
"unexpected": "dummy_value"
diff --git a/src/cobalt/storage/upgrade/testdata/full_data_v1.json b/src/cobalt/storage/upgrade/testdata/full_data_v1.json
index c5a039f..07b0296 100644
--- a/src/cobalt/storage/upgrade/testdata/full_data_v1.json
+++ b/src/cobalt/storage/upgrade/testdata/full_data_v1.json
@@ -2,22 +2,22 @@
{
"cookies": [
{
- "url": "https://www.youtube.com",
+ "url": "https://www.example.com/1",
"name": "cookie_name",
"value": "cookie_value",
- "domain": "cookie.domain",
- "path": "cookie/path",
+ "domain": ".example.com",
+ "path": "/1",
"creation": "13119668760000000",
"expiration": "13120000000000000",
"last_acess": "13119668770000000",
"http_only": true
},
{
- "url": "http://www.somewhere.com",
+ "url": "http://www.somewhere.com/2",
"name": "cookie_name_2",
"value": "cookie_value_2",
- "domain": "cookie.domain2",
- "path": "cookie/path/2",
+ "domain": ".somewhere.com",
+ "path": "/2",
"creation": "13109668760000000",
"expiration": "13110000000000000",
"last_acess": "13109668770000000",
diff --git a/src/cobalt/storage/upgrade/testdata/malformed_v1.json b/src/cobalt/storage/upgrade/testdata/malformed_v1.json
index 6d87bf0..4b26940 100644
--- a/src/cobalt/storage/upgrade/testdata/malformed_v1.json
+++ b/src/cobalt/storage/upgrade/testdata/malformed_v1.json
@@ -2,22 +2,22 @@
{
"cookies": [
{
- "url": "https://www.youtube.com",
+ "url": "https://www.example.com/1",
"name": "cookie_name",
"value": "cookie_value"
- "domain": "cookie.domain",
- "path": "cookie/path",
+ "domain": ".example.com",
+ "path": "/1",
"creation": "13119668760000000",
"expiration": "13120000000000000",
"last_acess": "13119668770000000",
"http_only": true
},
{
- "url": "http://www.somewhere.com",
+ "url": "http://www.somewhere.com/2",
"name": "cookie_name_2",
"value": "cookie_value_2",
- "domain": "cookie.domain2",
- "path": "cookie/path/2",
+ "domain": ".somewhere.com",
+ "path": "/2",
"creation": "13109668760000000",
"expiration": "13110000000000000",
"last_acess": "13109668770000000",
diff --git a/src/cobalt/storage/upgrade/testdata/minimal_cookie_v1.json b/src/cobalt/storage/upgrade/testdata/minimal_cookie_v1.json
index 7dbd209..a7f1983 100644
--- a/src/cobalt/storage/upgrade/testdata/minimal_cookie_v1.json
+++ b/src/cobalt/storage/upgrade/testdata/minimal_cookie_v1.json
@@ -2,7 +2,7 @@
{
"cookies": [
{
- "url": "https://www.youtube.com",
+ "url": "https://www.example.com",
"name": "cookie_name",
"value": "cookie_value"
}
diff --git a/src/cobalt/storage/upgrade/upgrade_reader.cc b/src/cobalt/storage/upgrade/upgrade_reader.cc
index 35fdb53..64ef1aa 100644
--- a/src/cobalt/storage/upgrade/upgrade_reader.cc
+++ b/src/cobalt/storage/upgrade/upgrade_reader.cc
@@ -30,6 +30,10 @@
namespace {
+// The header at the start of upgrade data.
+const int kHeaderSize = 4;
+const char kHeader[] = "UPG0";
+
// Used as a sanity check.
const int kMaxUpgradeDataSize = 10 * 1024 * 1024;
@@ -104,9 +108,14 @@
UpgradeReader::UpgradeReader(const char* data, int size) {
DCHECK(data);
- DCHECK_GT(size, 0);
+ DCHECK_GE(size, kHeaderSize);
DCHECK_LE(size, kMaxUpgradeDataSize);
+ // Check the header and offset the data.
+ DCHECK(IsUpgradeData(data, size));
+ data += kHeaderSize;
+ size -= kHeaderSize;
+
base::JSONReader json_reader;
scoped_ptr<base::Value> parsed(
json_reader.ReadToValue(std::string(data, static_cast<size_t>(size))));
@@ -200,6 +209,11 @@
local_storage_entries_.push_back(LocalStorageEntry(key, value));
}
+// static
+bool UpgradeReader::IsUpgradeData(const char* data, int size) {
+ return size >= kHeaderSize && memcmp(data, kHeader, kHeaderSize) == 0;
+}
+
void UpgradeReader::ProcessValues(const base::DictionaryValue* dictionary) {
DCHECK(dictionary);
diff --git a/src/cobalt/storage/upgrade/upgrade_reader.h b/src/cobalt/storage/upgrade/upgrade_reader.h
index 00d1462..83f0953 100644
--- a/src/cobalt/storage/upgrade/upgrade_reader.h
+++ b/src/cobalt/storage/upgrade/upgrade_reader.h
@@ -57,6 +57,8 @@
// storage entry doesn't exist, return NULL.
const LocalStorageEntry* GetLocalStorageEntry(int index) const;
+ static bool IsUpgradeData(const char* data, int size);
+
private:
// Process the parsed values and populate |cookies_| and
// |local_storage_entries_|.
diff --git a/src/cobalt/system_window/application_event.h b/src/cobalt/system_window/application_event.h
index 1387528..248cd9b 100644
--- a/src/cobalt/system_window/application_event.h
+++ b/src/cobalt/system_window/application_event.h
@@ -26,6 +26,8 @@
public:
enum Type {
kQuit,
+ kPause,
+ kUnpause,
kSuspend,
kResume,
};
diff --git a/src/glimp/egl/surface_impl.h b/src/glimp/egl/surface_impl.h
index ead72c4..fc026c3 100644
--- a/src/glimp/egl/surface_impl.h
+++ b/src/glimp/egl/surface_impl.h
@@ -32,6 +32,10 @@
virtual int GetWidth() const = 0;
virtual int GetHeight() const = 0;
+ // Returns true if the surface is a window surface, false if the surface is a
+ // pixel buffer or a pixmap.
+ virtual bool IsWindowSurface() const = 0;
+
private:
};
diff --git a/src/glimp/gles/context.cc b/src/glimp/gles/context.cc
index 2057fcb..608344f 100644
--- a/src/glimp/gles/context.cc
+++ b/src/glimp/gles/context.cc
@@ -2116,8 +2116,14 @@
void Context::SwapBuffers() {
GLIMP_TRACE_EVENT0(__FUNCTION__);
- Flush();
- impl_->SwapBuffers(default_draw_framebuffer_->color_attachment_surface());
+ egl::Surface* surface = default_draw_framebuffer_->color_attachment_surface();
+ // If surface is a pixel buffer or a pixmap, eglSwapBuffers has no effect, and
+ // no error is generated.
+ // https://www.khronos.org/registry/egl/sdk/docs/man/html/eglSwapBuffers.xhtml
+ if (surface->impl()->IsWindowSurface()) {
+ Flush();
+ impl_->SwapBuffers(surface);
+ }
}
bool Context::BindTextureToEGLSurface(egl::Surface* surface) {
diff --git a/src/starboard/client_porting/poem/assert_poem.h b/src/starboard/client_porting/poem/assert_poem.h
new file mode 100644
index 0000000..9d01a43
--- /dev/null
+++ b/src/starboard/client_porting/poem/assert_poem.h
@@ -0,0 +1,40 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A poem (POsix EMulation) for functions in string.h
+
+#ifndef STARBOARD_CLIENT_PORTING_POEM_ASSERT_POEM_H_
+#define STARBOARD_CLIENT_PORTING_POEM_ASSERT_POEM_H_
+
+#if defined(STARBOARD)
+
+#include "starboard/log.h"
+
+#if !defined(POEM_NO_EMULATION)
+
+// On one line so that the assert macros do not interfere with reporting of line
+// numbers in compiler error messages.
+#define assert(x) \
+ do { \
+ if (!(x)) { \
+ SbLogFormatF("expression %s failed at %s:%d", #x, __FILE__, __LINE__); \
+ SbSystemBreakIntoDebugger(); \
+ } \
+ } while (false);
+
+#endif // POEM_NO_EMULATION
+
+#endif // STARBOARD
+
+#endif // STARBOARD_CLIENT_PORTING_POEM_ASSERT_POEM_H_
diff --git a/src/starboard/client_porting/poem/math_poem.h b/src/starboard/client_porting/poem/math_poem.h
new file mode 100644
index 0000000..7f406d2
--- /dev/null
+++ b/src/starboard/client_porting/poem/math_poem.h
@@ -0,0 +1,62 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A poem (POsix EMulation) for functions in string.h
+
+#ifndef STARBOARD_CLIENT_PORTING_POEM_MATH_POEM_H_
+#define STARBOARD_CLIENT_PORTING_POEM_MATH_POEM_H_
+
+#if defined(STARBOARD)
+
+#include "starboard/double.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Takes floor of a float |f|. Meant to be a drop-in replacement for |floorf|
+static SB_C_INLINE float PoemSingleFloor(const float f) {
+ double d(f);
+ return SbDoubleFloor(d);
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#if !defined(POEM_NO_EMULATION)
+
+#define fabs(x) SbDoubleAbsolute(x)
+#define floor(x) SbDoubleFloor(x)
+#define floorf(x) PoemSingleFloor(x)
+#define pow(x, y) SbDoubleExponent(x, y)
+
+#include <math.h>
+#define ceil(x) ceil(x)
+#define fmod(x, y) fmod(x, y)
+#define modf(x, y) modf(x, y)
+#define log(x) log(x)
+#define sqrt(x) sqrt(x)
+#define sin(x) sin(x)
+#define cos(x) cos(x)
+#define tan(x) tan(x)
+#define atan(x) atan(x)
+#define atan2(x, y) atan2(x, y)
+#define asin(x) asin(x)
+#define acos(x) acos(x)
+#endif // POEM_NO_EMULATION
+
+#endif // STARBOARD
+
+#endif // STARBOARD_CLIENT_PORTING_POEM_MATH_POEM_H_
diff --git a/src/starboard/client_porting/poem/stdio_poem.h b/src/starboard/client_porting/poem/stdio_poem.h
index 2500568..d68c688 100644
--- a/src/starboard/client_porting/poem/stdio_poem.h
+++ b/src/starboard/client_porting/poem/stdio_poem.h
@@ -21,8 +21,8 @@
#if !defined(POEM_NO_EMULATION)
-#include "starboard/string.h"
#include "starboard/memory.h"
+#include "starboard/string.h"
#define wcsncmp(s1, s2, c) SbStringCompareWide(s1, s2, c)
@@ -35,6 +35,7 @@
#define vsscanf SbStringScan
#define sscanf SbStringScanF
#define malloc(sz) SbMemoryAllocateUnchecked(sz)
+#define calloc(c, s) SbMemoryCalloc(c, s)
#define free(a) SbMemoryFree(a)
#define realloc(m, sz) SbMemoryReallocateUnchecked(m, sz)
diff --git a/src/starboard/linux/shared/configuration_public.h b/src/starboard/linux/shared/configuration_public.h
index 9859ced..7d84890 100644
--- a/src/starboard/linux/shared/configuration_public.h
+++ b/src/starboard/linux/shared/configuration_public.h
@@ -52,6 +52,9 @@
// Whether the current platform provides the standard header limits.h.
#define SB_HAS_LIMITS_H 1
+// Whether the current platform provides the standard header float.h.
+#define SB_HAS_FLOAT_H 1
+
// Type detection for wchar_t.
#if defined(__WCHAR_MAX__) && \
(__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
diff --git a/src/starboard/linux/x64directfb/starboard_platform.gyp b/src/starboard/linux/x64directfb/starboard_platform.gyp
index cca05bb..331fd6c 100644
--- a/src/starboard/linux/x64directfb/starboard_platform.gyp
+++ b/src/starboard/linux/x64directfb/starboard_platform.gyp
@@ -170,6 +170,7 @@
'<(DEPTH)/starboard/shared/posix/log_format.cc',
'<(DEPTH)/starboard/shared/posix/log_is_tty.cc',
'<(DEPTH)/starboard/shared/posix/log_raw.cc',
+ '<(DEPTH)/starboard/shared/posix/memory_flush.cc',
'<(DEPTH)/starboard/shared/posix/set_non_blocking_internal.cc',
'<(DEPTH)/starboard/shared/posix/socket_accept.cc',
'<(DEPTH)/starboard/shared/posix/socket_bind.cc',
diff --git a/src/starboard/linux/x64x11/starboard_platform.gyp b/src/starboard/linux/x64x11/starboard_platform.gyp
index 6276a96..761a55d 100644
--- a/src/starboard/linux/x64x11/starboard_platform.gyp
+++ b/src/starboard/linux/x64x11/starboard_platform.gyp
@@ -133,6 +133,7 @@
'<(DEPTH)/starboard/shared/posix/log_format.cc',
'<(DEPTH)/starboard/shared/posix/log_is_tty.cc',
'<(DEPTH)/starboard/shared/posix/log_raw.cc',
+ '<(DEPTH)/starboard/shared/posix/memory_flush.cc',
'<(DEPTH)/starboard/shared/posix/set_non_blocking_internal.cc',
'<(DEPTH)/starboard/shared/posix/socket_accept.cc',
'<(DEPTH)/starboard/shared/posix/socket_bind.cc',
diff --git a/src/starboard/memory.h b/src/starboard/memory.h
index 81fd4f3..f418c30 100644
--- a/src/starboard/memory.h
+++ b/src/starboard/memory.h
@@ -142,6 +142,14 @@
// and another call to SbMemoryMap(0x1000) returns (void*)0xB000,
// SbMemoryUnmap(0xA000, 0x2000) should free both.
SB_EXPORT bool SbMemoryUnmap(void* virtual_address, int64_t size_bytes);
+
+#if SB_CAN(MAP_EXECUTABLE_MEMORY)
+// Flushes any data in the given virtual address range that is cached locally in
+// the current processor core to physical memory, ensuring that data and
+// instruction caches are cleared. This is required to be called on executable
+// memory that has been written to and might be executed in the future.
+SB_EXPORT void SbMemoryFlush(void* virtual_address, int64_t size_bytes);
+#endif
#endif // SB_HAS(MMAP)
// Gets the stack bounds for the current thread, placing the highest addressable
diff --git a/src/starboard/nplb/blitter_flush_context_test.cc b/src/starboard/nplb/blitter_flush_context_test.cc
index 1095b90..4707e10 100644
--- a/src/starboard/nplb/blitter_flush_context_test.cc
+++ b/src/starboard/nplb/blitter_flush_context_test.cc
@@ -67,6 +67,7 @@
// Check that flush succeeds after some commands have been submitted.
EXPECT_TRUE(SbBlitterFlushContext(context));
+ EXPECT_TRUE(SbBlitterDestroySurface(surface));
EXPECT_TRUE(SbBlitterDestroyContext(context));
EXPECT_TRUE(SbBlitterDestroyDevice(device));
}
diff --git a/src/starboard/nplb/blitter_pixel_tests/tests.cc b/src/starboard/nplb/blitter_pixel_tests/tests.cc
index 30c8a8b..9554da7 100644
--- a/src/starboard/nplb/blitter_pixel_tests/tests.cc
+++ b/src/starboard/nplb/blitter_pixel_tests/tests.cc
@@ -99,6 +99,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
#if SB_HAS(BILINEAR_FILTERING_SUPPORT)
@@ -116,6 +118,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, 2, 2),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, MinifyBlitRectToRect) {
@@ -128,6 +132,8 @@
SbBlitterBlitRectToRect(
context_, checker_image, SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth() / 8, GetHeight() / 8));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRectToRectPartialSourceRect) {
@@ -141,6 +147,8 @@
context_, checker_image,
SbBlitterMakeRect(GetWidth() / 2, 0, GetWidth() / 2, GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRectToRectTiled) {
@@ -153,6 +161,8 @@
SbBlitterBlitRectToRectTiled(
context_, checker_image, SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRectToRectTiledWithNoTiling) {
@@ -165,6 +175,8 @@
SbBlitterBlitRectToRectTiled(
context_, checker_image, SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRectToRectTiledNegativeOffset) {
@@ -178,6 +190,8 @@
context_, checker_image,
SbBlitterMakeRect(-GetWidth(), -GetHeight(), GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRectToRectTiledOffCenter) {
@@ -192,6 +206,8 @@
SbBlitterMakeRect(-GetWidth() / 2, -GetHeight() / 2, GetWidth(),
GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
void DrawRectsWithBatchBlit(SbBlitterSurface texture,
@@ -225,6 +241,8 @@
SbBlitterSetRenderTarget(context_, render_target_);
DrawRectsWithBatchBlit(checker_image, context_, GetWidth(), GetHeight());
+
+ SbBlitterDestroySurface(checker_image);
}
SbBlitterSurface CreateCheckerImageWithPixelData(SbBlitterDevice device,
@@ -333,6 +351,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitRedGreenRectToRectFromPixelData) {
@@ -343,6 +363,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitHalfTransparentRectToRectFromPixelData) {
@@ -353,6 +375,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitCanPunchThrough) {
@@ -368,6 +392,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlitCanBlend) {
@@ -383,6 +409,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, SimpleAlphaBlitWithNoColorModulation) {
@@ -397,6 +425,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlendedAlphaBlitWithNoColorModulationOnRed) {
@@ -410,6 +440,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, AlphaBlitWithBlueColorModulation) {
@@ -422,6 +454,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlendedAlphaBlitWithBlueColorModulationOnRed) {
@@ -436,6 +470,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlendedAlphaBlitWithAlphaColorModulationOnRed) {
@@ -450,6 +486,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlendedColorBlitWithAlphaColorModulationOnRed) {
@@ -465,6 +503,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, BlendedColorBlitWithAlphaColorModulation) {
@@ -478,6 +518,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, FillRectColorIsNotPremultiplied) {
@@ -512,6 +554,8 @@
SbBlitterBlitRectToRect(context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, ScissoredBlitRectToRectTiled) {
@@ -529,6 +573,8 @@
context_, checker_image,
SbBlitterMakeRect(0, 0, GetWidth() * 2, GetHeight() * 2),
SbBlitterMakeRect(0, 0, GetWidth(), GetHeight()));
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, ScissoredBlitRectToRects) {
@@ -543,6 +589,8 @@
SbBlitterSetScissor(
context_, SbBlitterMakeRect(32, 32, GetWidth() - 48, GetHeight() - 48));
DrawRectsWithBatchBlit(checker_image, context_, GetWidth(), GetHeight());
+
+ SbBlitterDestroySurface(checker_image);
}
TEST_F(SbBlitterPixelTest, ScissorResetsWhenSetRenderTargetIsCalled) {
diff --git a/src/starboard/nplb/memory_map_test.cc b/src/starboard/nplb/memory_map_test.cc
index 52cdc1c..96dd551 100644
--- a/src/starboard/nplb/memory_map_test.cc
+++ b/src/starboard/nplb/memory_map_test.cc
@@ -54,7 +54,7 @@
// Map 4x the amount of system memory (sequentially, not at once).
int64_t bytes_mapped = SbSystemGetTotalCPUMemory() / 4;
for (int64_t total_bytes_mapped = 0;
- total_bytes_mapped < SbSystemGetTotalCPUMemory() * 4;
+ total_bytes_mapped < SbSystemGetTotalCPUMemory() * 3;
total_bytes_mapped += bytes_mapped) {
void* memory = SbMemoryMap(bytes_mapped, kSbMemoryMapProtectWrite, "test");
ASSERT_NE(kFailed, memory);
@@ -102,7 +102,8 @@
return x + y;
}
-// It's not clear how portable this test is.
+// This test is known to run on x64, ARM, and MIPS32 with MIPS32 and MIPS16
+// instructions.
TEST(SbMemoryMapTest, CanExecuteMappedMemoryWithExecFlag) {
void* memory = SbMemoryMap(
kSize, kSbMemoryMapProtectReadWrite | kSbMemoryMapProtectExec, "test");
@@ -120,9 +121,19 @@
// cast to a uint* which will be implicitly casted to a void* below.
uint8_t* sum_function_start = reinterpret_cast<uint8_t*>(&sum);
+ // MIPS16 instruction are kept odd addresses to differentiate between that and
+ // MIPS32 instructions. Most other instructions are aligned to at least even
+ // addresses, so this code should do nothing for those architectures.
+ // https://www.linux-mips.org/wiki/MIPS16
+ ptrdiff_t sum_function_offset =
+ sum_function_start -
+ reinterpret_cast<uint8_t*>(
+ reinterpret_cast<intptr_t>(sum_function_start) & ~0x1);
+ sum_function_start -= sum_function_offset;
+
// Get the last address of the page that |sum_function_start| is on.
uint8_t* sum_function_page_end = reinterpret_cast<uint8_t*>(
- (reinterpret_cast<intptr_t>(&sum) / SB_MEMORY_PAGE_SIZE) *
+ (reinterpret_cast<intptr_t>(sum_function_start) / SB_MEMORY_PAGE_SIZE) *
SB_MEMORY_PAGE_SIZE +
SB_MEMORY_PAGE_SIZE);
ASSERT_TRUE(SbMemoryIsAligned(sum_function_page_end, SB_MEMORY_PAGE_SIZE));
@@ -132,8 +143,10 @@
ASSERT_LE(bytes_to_copy, SB_MEMORY_PAGE_SIZE);
SbMemoryCopy(memory, sum_function_start, bytes_to_copy);
- SumFunction mapped_function =
- reinterpret_cast<SumFunction>(reinterpret_cast<char*>(memory));
+ SbMemoryFlush(memory, bytes_to_copy);
+
+ SumFunction mapped_function = reinterpret_cast<SumFunction>(
+ reinterpret_cast<uint8_t*>(memory) + sum_function_offset);
EXPECT_EQ(4, (*mapped_function)(1, 3));
EXPECT_EQ(5, (*mapped_function)(10, -5));
diff --git a/src/starboard/nplb/system_symbolize_test.cc b/src/starboard/nplb/system_symbolize_test.cc
index 57429af..a0de2e2 100644
--- a/src/starboard/nplb/system_symbolize_test.cc
+++ b/src/starboard/nplb/system_symbolize_test.cc
@@ -26,8 +26,9 @@
EXPECT_NE(static_cast<void*>(NULL), stack[0]);
char buffer[1024] = {0};
bool result = SbSystemSymbolize(stack[0], buffer, SB_ARRAY_SIZE_INT(buffer));
- EXPECT_TRUE(result);
- EXPECT_LT(0, SbStringGetLength(buffer));
+ if (result) {
+ EXPECT_LT(0, SbStringGetLength(buffer));
+ }
}
TEST(SbSystemSymbolizeTest, RainyDay) {
diff --git a/src/starboard/player.h b/src/starboard/player.h
index bc19c20..236f3f6 100644
--- a/src/starboard/player.h
+++ b/src/starboard/player.h
@@ -272,10 +272,10 @@
// kSbMediaTypeVideo, and must be NULL otherwise. |sample_drm_info| must be
// provided for encrypted samples, and must be NULL otherwise.
//
-// The lifetime of |video_sample_info| and |sample_drm_info| are not guaranteed
-// past the call to SbPlayerWriteSample, so the implementation must copy any
-// information it wants to retain from those structures synchronously, before it
-// returns.
+// The lifetime of |video_sample_info| and |sample_drm_info| (as well as member
+// |subsample_mapping| contained inside it) are not guaranteed past the call to
+// SbPlayerWriteSample, so the implementation must copy any information it wants
+// to retain from those structures synchronously, before it returns.
SB_EXPORT void SbPlayerWriteSample(
SbPlayer player,
SbMediaType sample_type,
diff --git a/src/starboard/raspi/1/starboard_platform.gyp b/src/starboard/raspi/1/starboard_platform.gyp
index f22110b..6d8c257 100644
--- a/src/starboard/raspi/1/starboard_platform.gyp
+++ b/src/starboard/raspi/1/starboard_platform.gyp
@@ -140,6 +140,7 @@
'<(DEPTH)/starboard/shared/posix/log_format.cc',
'<(DEPTH)/starboard/shared/posix/log_is_tty.cc',
'<(DEPTH)/starboard/shared/posix/log_raw.cc',
+ '<(DEPTH)/starboard/shared/posix/memory_flush.cc',
'<(DEPTH)/starboard/shared/posix/set_non_blocking_internal.cc',
'<(DEPTH)/starboard/shared/posix/socket_accept.cc',
'<(DEPTH)/starboard/shared/posix/socket_bind.cc',
diff --git a/src/starboard/raspi/shared/configuration_public.h b/src/starboard/raspi/shared/configuration_public.h
index 03cb289..942feef 100644
--- a/src/starboard/raspi/shared/configuration_public.h
+++ b/src/starboard/raspi/shared/configuration_public.h
@@ -46,6 +46,9 @@
// Whether the current platform provides the standard header limits.h.
#define SB_HAS_LIMITS_H 1
+// Whether the current platform provides the standard header float.h.
+#define SB_HAS_FLOAT_H 1
+
// Type detection for wchar_t.
#if defined(__WCHAR_MAX__) && \
(__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
diff --git a/src/starboard/shared/posix/memory_flush.cc b/src/starboard/shared/posix/memory_flush.cc
new file mode 100644
index 0000000..e45ea16
--- /dev/null
+++ b/src/starboard/shared/posix/memory_flush.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "starboard/memory.h"
+
+#include <sys/mman.h>
+
+#include <iomanip>
+
+#include "starboard/log.h"
+
+#if !SB_CAN(MAP_EXECUTABLE_MEMORY)
+#error "You shouldn't implement SbMemoryFlush unless you can map " \
+ "memory pages as executable"
+#endif
+
+void SbMemoryFlush(void* virtual_address, int64_t size_bytes) {
+ char* memory = reinterpret_cast<char*>(virtual_address);
+#if !SB_IS(ARCH_ARM)
+ int result = msync(memory, size_bytes, MS_SYNC);
+ SB_DCHECK(result == 0) << "msync failed: 0x" << std::hex << result << " ("
+ << std::dec << result << "d)";
+#endif
+ __builtin___clear_cache(memory, memory + size_bytes);
+}
diff --git a/src/starboard/shared/signal/suspend_signals.cc b/src/starboard/shared/signal/suspend_signals.cc
index 0ea0f09..a3cbeea 100644
--- a/src/starboard/shared/signal/suspend_signals.cc
+++ b/src/starboard/shared/signal/suspend_signals.cc
@@ -29,6 +29,15 @@
namespace {
+int UnblockSignal(int signal_id) {
+ sigset_t mask;
+ ::sigemptyset(&mask);
+ ::sigaddset(&mask, signal_id);
+
+ sigset_t previous_mask;
+ return ::sigprocmask(SIG_UNBLOCK, &mask, &previous_mask);
+}
+
void SetSignalHandler(int signal_id, SignalHandlerFunction handler) {
struct sigaction action = {0};
@@ -40,8 +49,7 @@
}
void SuspendDone(void* /*context*/) {
- SetSignalHandler(SIGTSTP, SIG_DFL);
- raise(SIGTSTP);
+ raise(SIGSTOP);
}
void Suspend(int signal_id) {
@@ -51,8 +59,7 @@
void Resume(int signal_id) {
LogSignalCaught(signal_id);
- SetSignalHandler(SIGTSTP, &Suspend);
- // TODO: Resume or Unpause based on state.
+ // TODO: Resume or Unpause based on state before suspend?
starboard::Application::Get()->Unpause(NULL, NULL);
}
@@ -60,12 +67,13 @@
void InstallSuspendSignalHandlers() {
SetSignalHandler(SIGTSTP, &Suspend);
+ UnblockSignal(SIGTSTP);
SetSignalHandler(SIGCONT, &Resume);
}
void UninstallSuspendSignalHandlers() {
- SetSignalHandler(SIGTSTP, SIG_DFL);
SetSignalHandler(SIGCONT, SIG_DFL);
+ SetSignalHandler(SIGTSTP, SIG_DFL);
}
} // namespace signal
diff --git a/src/starboard/shared/starboard/player/input_buffer_internal.cc b/src/starboard/shared/starboard/player/input_buffer_internal.cc
index 45a65c9..8431fc2 100644
--- a/src/starboard/shared/starboard/player/input_buffer_internal.cc
+++ b/src/starboard/shared/starboard/player/input_buffer_internal.cc
@@ -49,7 +49,14 @@
video_sample_info_ = *video_sample_info;
}
if (has_drm_info_) {
+ SB_DCHECK(sample_drm_info->subsample_count > 0);
+
+ subsamples_.assign(sample_drm_info->subsample_mapping,
+ sample_drm_info->subsample_mapping +
+ sample_drm_info->subsample_count);
drm_info_ = *sample_drm_info;
+ drm_info_.subsample_mapping =
+ subsamples_.empty() ? NULL : &subsamples_[0];
}
}
@@ -107,6 +114,7 @@
bool has_drm_info_;
SbDrmSampleInfo drm_info_;
std::vector<uint8_t> decrypted_data_;
+ std::vector<SbDrmSubSampleMapping> subsamples_;
SB_DISALLOW_COPY_AND_ASSIGN(ReferenceCountedBuffer);
};
diff --git a/src/starboard/shared/starboard/player/input_buffer_internal.h b/src/starboard/shared/starboard/player/input_buffer_internal.h
index 273f873..5cc7af6 100644
--- a/src/starboard/shared/starboard/player/input_buffer_internal.h
+++ b/src/starboard/shared/starboard/player/input_buffer_internal.h
@@ -15,6 +15,7 @@
#ifndef STARBOARD_SHARED_STARBOARD_PLAYER_INPUT_BUFFER_INTERNAL_H_
#define STARBOARD_SHARED_STARBOARD_PLAYER_INPUT_BUFFER_INTERNAL_H_
+#include "starboard/drm.h"
#include "starboard/media.h"
#include "starboard/player.h"
#include "starboard/shared/internal_only.h"
diff --git a/src/starboard/shared/stub/memory_flush.cc b/src/starboard/shared/stub/memory_flush.cc
new file mode 100644
index 0000000..0ac4dd2
--- /dev/null
+++ b/src/starboard/shared/stub/memory_flush.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "starboard/memory.h"
+
+void SbMemoryFlush(void* /*virtual_address*/, int64_t /*size_bytes*/) {}
diff --git a/src/starboard/stub/configuration_public.h b/src/starboard/stub/configuration_public.h
index 5af547f..9a8c64f 100644
--- a/src/starboard/stub/configuration_public.h
+++ b/src/starboard/stub/configuration_public.h
@@ -122,6 +122,9 @@
// Whether the current platform provides the standard header limits.h.
#define SB_HAS_LIMITS_H 1
+// Whether the current platform provides the standard header float.h.
+#define SB_HAS_FLOAT_H 1
+
// Type detection for wchar_t.
#if defined(__WCHAR_MAX__) && \
(__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
diff --git a/src/starboard/stub/starboard_platform.gyp b/src/starboard/stub/starboard_platform.gyp
index 707fecd..ed61292 100644
--- a/src/starboard/stub/starboard_platform.gyp
+++ b/src/starboard/stub/starboard_platform.gyp
@@ -89,6 +89,7 @@
'<(DEPTH)/starboard/shared/stub/memory_compare.cc',
'<(DEPTH)/starboard/shared/stub/memory_copy.cc',
'<(DEPTH)/starboard/shared/stub/memory_find_byte.cc',
+ '<(DEPTH)/starboard/shared/stub/memory_flush.cc',
'<(DEPTH)/starboard/shared/stub/memory_free.cc',
'<(DEPTH)/starboard/shared/stub/memory_free_aligned.cc',
'<(DEPTH)/starboard/shared/stub/memory_get_stack_bounds.cc',
diff --git a/src/starboard/types.h b/src/starboard/types.h
index 16418bb..746689a 100644
--- a/src/starboard/types.h
+++ b/src/starboard/types.h
@@ -47,6 +47,10 @@
#include <stdarg.h>
#endif
+#if SB_HAS(FLOAT_H)
+#include <float.h>
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -164,6 +168,10 @@
// A value that represents an int that is probably invalid.
#define kSbInvalidInt kSbInt32Min
+#if !SB_HAS(FLOAT_H)
+#define DBL_MANT_DIG 53
+#endif
+
// --- Standard Include Emulation Audits ---------------------------------------
#if !defined(UINT_MAX) || !defined(INT_MIN) || !defined(INT_MAX) || \
diff --git a/src/third_party/flac/flac.h b/src/third_party/flac/flac.h
index 5070317..b10e4ab 100644
--- a/src/third_party/flac/flac.h
+++ b/src/third_party/flac/flac.h
@@ -9,6 +9,15 @@
// This is a shim header to include the right flac headers.
// Use this instead of referencing the flac headers directly.
+#ifdef STARBOARD
+#include "starboard/configuration.h"
+# if SB_IS(BIG_ENDIAN)
+# define WORDS_BIGENDIAN 1
+# else
+# undef WORDS_BIGENDIAN
+# endif
+#endif // STARBOARD
+
#if defined(USE_SYSTEM_FLAC)
#include <FLAC/stream_encoder.h>
#else
diff --git a/src/third_party/flac/include/FLAC/callback.h b/src/third_party/flac/include/FLAC/callback.h
index c954121..2ee135e 100644
--- a/src/third_party/flac/include/FLAC/callback.h
+++ b/src/third_party/flac/include/FLAC/callback.h
@@ -33,7 +33,10 @@
#define FLAC__CALLBACK_H
#include "ordinals.h"
+
+#ifndef STARBOARD
#include <stdlib.h> /* for size_t */
+#endif // STARBOARD
/** \file include/FLAC/callback.h
*
diff --git a/src/third_party/flac/include/FLAC/metadata.h b/src/third_party/flac/include/FLAC/metadata.h
index fff90b0..b3e43db 100644
--- a/src/third_party/flac/include/FLAC/metadata.h
+++ b/src/third_party/flac/include/FLAC/metadata.h
@@ -32,7 +32,10 @@
#ifndef FLAC__METADATA_H
#define FLAC__METADATA_H
+#ifndef STARBOARD
#include <sys/types.h> /* for off_t */
+#endif // STARBOARD
+
#include "export.h"
#include "callback.h"
#include "format.h"
diff --git a/src/third_party/flac/include/FLAC/stream_decoder.h b/src/third_party/flac/include/FLAC/stream_decoder.h
index 9ac1594..00b5012 100644
--- a/src/third_party/flac/include/FLAC/stream_decoder.h
+++ b/src/third_party/flac/include/FLAC/stream_decoder.h
@@ -32,7 +32,10 @@
#ifndef FLAC__STREAM_DECODER_H
#define FLAC__STREAM_DECODER_H
+#ifndef COBALT
#include <stdio.h> /* for FILE */
+#endif // COBALT
+
#include "export.h"
#include "format.h"
@@ -1082,6 +1085,7 @@
void *client_data
);
+#ifndef COBALT
/** Initialize the decoder instance to decode Ogg FLAC streams.
*
* This flavor of initialization sets up the decoder to decode from a
@@ -1346,6 +1350,7 @@
FLAC__StreamDecoderErrorCallback error_callback,
void *client_data
);
+#endif // COBALT
/** Finish the decoding process.
* Flushes the decoding buffer, releases resources, resets the decoder
diff --git a/src/third_party/flac/include/FLAC/stream_encoder.h b/src/third_party/flac/include/FLAC/stream_encoder.h
index dbbbb23..b030bb2 100644
--- a/src/third_party/flac/include/FLAC/stream_encoder.h
+++ b/src/third_party/flac/include/FLAC/stream_encoder.h
@@ -32,7 +32,10 @@
#ifndef FLAC__STREAM_ENCODER_H
#define FLAC__STREAM_ENCODER_H
+#ifndef COBALT
#include <stdio.h> /* for FILE */
+#endif // COBALT
+
#include "export.h"
#include "format.h"
#include "stream_decoder.h"
@@ -1470,6 +1473,7 @@
*/
FLAC_API FLAC__StreamEncoderInitStatus FLAC__stream_encoder_init_stream(FLAC__StreamEncoder *encoder, FLAC__StreamEncoderWriteCallback write_callback, FLAC__StreamEncoderSeekCallback seek_callback, FLAC__StreamEncoderTellCallback tell_callback, FLAC__StreamEncoderMetadataCallback metadata_callback, void *client_data);
+#ifndef COBALT
/** Initialize the encoder instance to encode Ogg FLAC streams.
*
* This flavor of initialization sets up the encoder to encode to a FLAC
@@ -1671,6 +1675,7 @@
* see FLAC__StreamEncoderInitStatus for the meanings of other return values.
*/
FLAC_API FLAC__StreamEncoderInitStatus FLAC__stream_encoder_init_ogg_file(FLAC__StreamEncoder *encoder, const char *filename, FLAC__StreamEncoderProgressCallback progress_callback, void *client_data);
+#endif // COBALT
/** Finish the encoding process.
* Flushes the encoding buffer, releases resources, resets the encoder
diff --git a/src/third_party/flac/include/share/alloc.h b/src/third_party/flac/include/share/alloc.h
index 2a6ac39..a6761c3 100644
--- a/src/third_party/flac/include/share/alloc.h
+++ b/src/third_party/flac/include/share/alloc.h
@@ -5,6 +5,10 @@
#ifndef THIRD_PARTY_FLAC_INCLUDE_SHARE_ALLOC_H
#define THIRD_PARTY_FLAC_INCLUDE_SHARE_ALLOC_H
+#ifdef STARBOARD
+#include "starboard/types.h"
+#endif // STARBOARD
+
void *safe_malloc_(size_t size);
void *safe_calloc_(size_t num_items, size_t size);
diff --git a/src/third_party/flac/src/libFLAC/alloc.c b/src/third_party/flac/src/libFLAC/alloc.c
index c8c30d3..2297adf 100644
--- a/src/third_party/flac/src/libFLAC/alloc.c
+++ b/src/third_party/flac/src/libFLAC/alloc.c
@@ -3,10 +3,15 @@
// found in the LICENSE file.
#include <limits.h>
+
+#ifndef STARBOARD
#if !defined _MSC_VER
#include <stdint.h>
#endif
#include <stdlib.h>
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdio_poem.h"
#include "share/alloc.h"
void *safe_malloc_(size_t size) {
diff --git a/src/third_party/flac/src/libFLAC/bitreader.c b/src/third_party/flac/src/libFLAC/bitreader.c
index 5c47b1a..29d64a0 100644
--- a/src/third_party/flac/src/libFLAC/bitreader.c
+++ b/src/third_party/flac/src/libFLAC/bitreader.c
@@ -33,6 +33,7 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#include <stdlib.h> /* for malloc() */
#include <string.h> /* for memcpy(), memset() */
#if defined(_MSC_VER) && defined(HAVE_WINSOCK_H)
@@ -44,6 +45,11 @@
#else
#include <netinet/in.h> /* for ntohl() */
#endif
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdio_poem.h"
+#include "starboard/client_porting/poem/string_poem.h"
+
#include "private/bitmath.h"
#include "private/bitreader.h"
#include "private/crc.h"
@@ -61,8 +67,11 @@
#if WORDS_BIGENDIAN
#define SWAP_BE_WORD_TO_HOST(x) (x)
#else
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(COBALT)
#define SWAP_BE_WORD_TO_HOST(x) local_swap32_(x)
+#elif defined(STARBOARD)
+#include "starboard/byte_swap.h"
+#define SWAP_BE_WORD_TO_HOST(x) SB_NET_TO_HOST_U32(x)
#else
#define SWAP_BE_WORD_TO_HOST(x) ntohl(x)
#endif
@@ -149,7 +158,7 @@
FLAC__CPUInfo cpu_info;
};
-#if defined(_MSC_VER) && defined(HAVE_WINSOCK_H)
+#if defined(_MSC_VER) && defined(HAVE_WINSOCK_H) && !defined(COBALT)
/* OPT: an MSVC built-in would be better */
static _inline FLAC__uint32 local_swap32_(FLAC__uint32 x)
{
@@ -263,7 +272,7 @@
#if WORDS_BIGENDIAN
#else
end = (br->words*FLAC__BYTES_PER_WORD + br->bytes + bytes + (FLAC__BYTES_PER_WORD-1)) / FLAC__BYTES_PER_WORD;
-# if defined(_MSC_VER) && (FLAC__BYTES_PER_WORD == 4)
+# if defined(_MSC_VER) && (FLAC__BYTES_PER_WORD == 4) && !defined(COBALT)
if(br->cpu_info.type == FLAC__CPUINFO_TYPE_IA32 && br->cpu_info.data.ia32.bswap) {
start = br->words;
local_swap32_block_(br->buffer + start, end - start);
@@ -361,6 +370,7 @@
return true;
}
+#ifndef COBALT
void FLAC__bitreader_dump(const FLAC__BitReader *br, FILE *out)
{
unsigned i, j;
@@ -390,6 +400,7 @@
}
}
}
+#endif // COBALT
void FLAC__bitreader_reset_read_crc16(FLAC__BitReader *br, FLAC__uint16 seed)
{
diff --git a/src/third_party/flac/src/libFLAC/bitwriter.c b/src/third_party/flac/src/libFLAC/bitwriter.c
index 3b8599c..f1c54cf 100644
--- a/src/third_party/flac/src/libFLAC/bitwriter.c
+++ b/src/third_party/flac/src/libFLAC/bitwriter.c
@@ -33,6 +33,7 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#include <stdlib.h> /* for malloc() */
#include <string.h> /* for memcpy(), memset() */
#if defined(_MSC_VER) && defined(HAVE_WINSOCK_H)
@@ -44,6 +45,11 @@
#else
#include <netinet/in.h> /* for ntohl() */
#endif
+#else // STARBOARD
+#include "starboard/byte_swap.h"
+#include "starboard/client_porting/poem/stdio_poem.h"
+#endif // STARBOARD
+
#if 0 /* UNUSED */
#include "private/bitmath.h"
#endif
@@ -63,8 +69,11 @@
#if WORDS_BIGENDIAN
#define SWAP_BE_WORD_TO_HOST(x) (x)
#else
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(COBALT)
#define SWAP_BE_WORD_TO_HOST(x) local_swap32_(x)
+#elif defined(STARBOARD)
+#include "starboard/byte_swap.h"
+#define SWAP_BE_WORD_TO_HOST(x) SB_NET_TO_HOST_U32(x)
#else
#define SWAP_BE_WORD_TO_HOST(x) ntohl(x)
#endif
@@ -207,6 +216,7 @@
bw->words = bw->bits = 0;
}
+#ifndef COBALT
void FLAC__bitwriter_dump(const FLAC__BitWriter *bw, FILE *out)
{
unsigned i, j;
@@ -230,6 +240,7 @@
}
}
}
+#endif // COBALT
FLAC__bool FLAC__bitwriter_get_write_crc16(FLAC__BitWriter *bw, FLAC__uint16 *crc)
{
diff --git a/src/third_party/flac/src/libFLAC/cpu.c b/src/third_party/flac/src/libFLAC/cpu.c
index 60b73bf..904ebbd 100644
--- a/src/third_party/flac/src/libFLAC/cpu.c
+++ b/src/third_party/flac/src/libFLAC/cpu.c
@@ -34,6 +34,8 @@
#endif
#include "private/cpu.h"
+
+#ifndef STARBOARD
#include <stdlib.h>
#include <stdio.h>
@@ -54,6 +56,7 @@
# include <signal.h>
# include <setjmp.h>
+#endif // STARBOARD
static sigjmp_buf jmpbuf;
static volatile sig_atomic_t canjump = 0;
@@ -71,13 +74,13 @@
# endif /* FLAC__NO_ASM */
#endif /* FLAC__CPU_PPC */
-#if defined (__NetBSD__) || defined(__OpenBSD__)
+#if (defined (__NetBSD__) || defined(__OpenBSD__)) && !defined(STARBOARD)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
#endif
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#if (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)) && !defined(STARBOARD)
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
diff --git a/src/third_party/flac/src/libFLAC/fixed.c b/src/third_party/flac/src/libFLAC/fixed.c
index 1a3aac0..733e1e3 100644
--- a/src/third_party/flac/src/libFLAC/fixed.c
+++ b/src/third_party/flac/src/libFLAC/fixed.c
@@ -33,8 +33,14 @@
# include <config.h>
#endif
-#include <math.h>
+#include <math.h> /* for log() */
+
+#ifndef STARBOARD
#include <string.h>
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/string_poem.h"
+
#include "private/bitmath.h"
#include "private/fixed.h"
#include "FLAC/assert.h"
diff --git a/src/third_party/flac/src/libFLAC/format.c b/src/third_party/flac/src/libFLAC/format.c
index 749461d..faead3c 100644
--- a/src/third_party/flac/src/libFLAC/format.c
+++ b/src/third_party/flac/src/libFLAC/format.c
@@ -33,9 +33,16 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#include <stdio.h>
#include <stdlib.h> /* for qsort() */
#include <string.h> /* for memset() */
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdio_poem.h"
+#include "starboard/client_porting/poem/stdlib_poem.h"
+#include "starboard/client_porting/poem/string_poem.h"
+
#include "FLAC/assert.h"
#include "FLAC/format.h"
#include "private/format.h"
@@ -581,11 +588,11 @@
FLAC__ASSERT(object->capacity_by_order > 0 || (0 == object->parameters && 0 == object->raw_bits));
if(object->capacity_by_order < max_partition_order) {
- if(0 == (object->parameters = (unsigned*)realloc(object->parameters, sizeof(unsigned)*(1 << max_partition_order))))
+ if(0 == (object->parameters = (unsigned*)realloc(object->parameters, sizeof(unsigned)*(1LL << max_partition_order))))
return false;
- if(0 == (object->raw_bits = (unsigned*)realloc(object->raw_bits, sizeof(unsigned)*(1 << max_partition_order))))
+ if(0 == (object->raw_bits = (unsigned*)realloc(object->raw_bits, sizeof(unsigned)*(1LL << max_partition_order))))
return false;
- memset(object->raw_bits, 0, sizeof(unsigned)*(1 << max_partition_order));
+ memset(object->raw_bits, 0, sizeof(unsigned)*(1LL << max_partition_order));
object->capacity_by_order = max_partition_order;
}
diff --git a/src/third_party/flac/src/libFLAC/include/private/bitreader.h b/src/third_party/flac/src/libFLAC/include/private/bitreader.h
index fd0f6aa..b91dc57 100644
--- a/src/third_party/flac/src/libFLAC/include/private/bitreader.h
+++ b/src/third_party/flac/src/libFLAC/include/private/bitreader.h
@@ -32,10 +32,16 @@
#ifndef FLAC__PRIVATE__BITREADER_H
#define FLAC__PRIVATE__BITREADER_H
+#ifndef COBALT
#include <stdio.h> /* for FILE */
+#endif // COBALT
#include "FLAC/ordinals.h"
#include "cpu.h"
+#ifdef STARBOARD
+#include "starboard/types.h"
+#endif // STARBOARD
+
/*
* opaque structure definition
*/
@@ -52,7 +58,9 @@
FLAC__bool FLAC__bitreader_init(FLAC__BitReader *br, FLAC__CPUInfo cpu, FLAC__BitReaderReadCallback rcb, void *cd);
void FLAC__bitreader_free(FLAC__BitReader *br); /* does not 'free(br)' */
FLAC__bool FLAC__bitreader_clear(FLAC__BitReader *br);
+#ifndef COBALT
void FLAC__bitreader_dump(const FLAC__BitReader *br, FILE *out);
+#endif // COBALT
/*
* CRC functions
diff --git a/src/third_party/flac/src/libFLAC/include/private/bitwriter.h b/src/third_party/flac/src/libFLAC/include/private/bitwriter.h
index aa5c4f7..6315f22 100644
--- a/src/third_party/flac/src/libFLAC/include/private/bitwriter.h
+++ b/src/third_party/flac/src/libFLAC/include/private/bitwriter.h
@@ -32,9 +32,15 @@
#ifndef FLAC__PRIVATE__BITWRITER_H
#define FLAC__PRIVATE__BITWRITER_H
+#ifndef COBALT
#include <stdio.h> /* for FILE */
+#endif // COBALT
#include "FLAC/ordinals.h"
+#ifdef STARBOARD
+#include "starboard/types.h"
+#endif // STARBOARD
+
/*
* opaque structure definition
*/
@@ -49,7 +55,9 @@
FLAC__bool FLAC__bitwriter_init(FLAC__BitWriter *bw);
void FLAC__bitwriter_free(FLAC__BitWriter *bw); /* does not 'free(buffer)' */
void FLAC__bitwriter_clear(FLAC__BitWriter *bw);
+#ifndef COBALT
void FLAC__bitwriter_dump(const FLAC__BitWriter *bw, FILE *out);
+#endif // COBALT
/*
* CRC functions
diff --git a/src/third_party/flac/src/libFLAC/include/private/md5.h b/src/third_party/flac/src/libFLAC/include/private/md5.h
index e5f675a..01fbc6b 100644
--- a/src/third_party/flac/src/libFLAC/include/private/md5.h
+++ b/src/third_party/flac/src/libFLAC/include/private/md5.h
@@ -28,6 +28,10 @@
#include "FLAC/ordinals.h"
+#ifdef STARBOARD
+#include "starboard/types.h"
+#endif // STARBOARD
+
typedef struct {
FLAC__uint32 in[16];
FLAC__uint32 buf[4];
diff --git a/src/third_party/flac/src/libFLAC/include/private/memory.h b/src/third_party/flac/src/libFLAC/include/private/memory.h
index 7852c81..a5e0afd 100644
--- a/src/third_party/flac/src/libFLAC/include/private/memory.h
+++ b/src/third_party/flac/src/libFLAC/include/private/memory.h
@@ -36,7 +36,11 @@
#include <config.h>
#endif
+#ifndef STARBOARD
#include <stdlib.h> /* for size_t */
+#else // STARBOARD
+#include "starboard/types.h"
+#endif // STARBOARD
#include "private/float.h"
#include "FLAC/ordinals.h" /* for FLAC__bool */
diff --git a/src/third_party/flac/src/libFLAC/md5.c b/src/third_party/flac/src/libFLAC/md5.c
index 1a60bac..6daa923 100644
--- a/src/third_party/flac/src/libFLAC/md5.c
+++ b/src/third_party/flac/src/libFLAC/md5.c
@@ -2,8 +2,13 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#include <stdlib.h> /* for malloc() */
#include <string.h> /* for memcpy() */
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdio_poem.h"
+#include "starboard/client_porting/poem/string_poem.h"
#include "private/md5.h"
#include "share/alloc.h"
diff --git a/src/third_party/flac/src/libFLAC/memory.c b/src/third_party/flac/src/libFLAC/memory.c
index 4d10097..682c215 100644
--- a/src/third_party/flac/src/libFLAC/memory.c
+++ b/src/third_party/flac/src/libFLAC/memory.c
@@ -37,6 +37,8 @@
#include "FLAC/assert.h"
#include "share/alloc.h"
+#include "starboard/client_porting/poem/stdio_poem.h"
+
void *FLAC__memory_alloc_aligned(size_t bytes, void **aligned_address)
{
void *x;
diff --git a/src/third_party/flac/src/libFLAC/stream_decoder.c b/src/third_party/flac/src/libFLAC/stream_decoder.c
index 7759a70..814cb43 100644
--- a/src/third_party/flac/src/libFLAC/stream_decoder.c
+++ b/src/third_party/flac/src/libFLAC/stream_decoder.c
@@ -33,6 +33,7 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#if defined _MSC_VER || defined __MINGW32__
#include <io.h> /* for _setmode() */
#include <fcntl.h> /* for _O_BINARY */
@@ -46,6 +47,11 @@
#include <string.h> /* for memset/memcpy() */
#include <sys/stat.h> /* for stat() */
#include <sys/types.h> /* for off_t */
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdio_poem.h"
+#include "starboard/client_porting/poem/string_poem.h"
+
#if defined _MSC_VER || defined __BORLANDC__ || defined __MINGW32__
#if _MSC_VER <= 1600 || defined __BORLANDC__ /* @@@ [2G limit] */
#define fseeko fseek
@@ -54,9 +60,6 @@
#define fseeko _fseeki64
#define ftello _ftelli64
#endif
-#elif defined(__LB_SHELL__)
-#define fseeko fseek
-#define ftello ftell
#endif
#include "FLAC/assert.h"
#include "share/alloc.h"
@@ -109,7 +112,9 @@
***********************************************************************/
static void set_defaults_(FLAC__StreamDecoder *decoder);
+#ifndef COBALT
static FILE *get_binary_stdin_(void);
+#endif // COBALT
static FLAC__bool allocate_output_(FLAC__StreamDecoder *decoder, unsigned size, unsigned channels);
static FLAC__bool has_id_filtered_(FLAC__StreamDecoder *decoder, FLAC__byte *id);
static FLAC__bool find_metadata_(FLAC__StreamDecoder *decoder);
@@ -141,11 +146,13 @@
#if FLAC__HAS_OGG
static FLAC__bool seek_to_absolute_sample_ogg_(FLAC__StreamDecoder *decoder, FLAC__uint64 stream_length, FLAC__uint64 target_sample);
#endif
+#ifndef COBALT
static FLAC__StreamDecoderReadStatus file_read_callback_(const FLAC__StreamDecoder *decoder, FLAC__byte buffer[], size_t *bytes, void *client_data);
static FLAC__StreamDecoderSeekStatus file_seek_callback_(const FLAC__StreamDecoder *decoder, FLAC__uint64 absolute_byte_offset, void *client_data);
static FLAC__StreamDecoderTellStatus file_tell_callback_(const FLAC__StreamDecoder *decoder, FLAC__uint64 *absolute_byte_offset, void *client_data);
static FLAC__StreamDecoderLengthStatus file_length_callback_(const FLAC__StreamDecoder *decoder, FLAC__uint64 *stream_length, void *client_data);
static FLAC__bool file_eof_callback_(const FLAC__StreamDecoder *decoder, void *client_data);
+#endif // COBALT
/***********************************************************************
*
@@ -175,7 +182,9 @@
void (*local_lpc_restore_signal_16bit_order8)(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[]);
FLAC__bool (*local_bitreader_read_rice_signed_block)(FLAC__BitReader *br, int vals[], unsigned nvals, unsigned parameter);
void *client_data;
+#ifndef COBALT
FILE *file; /* only used if FLAC__stream_decoder_init_file()/FLAC__stream_decoder_init_file() called, else NULL */
+#endif // COBALT
FLAC__BitReader *input;
FLAC__int32 *output[FLAC__MAX_CHANNELS];
FLAC__int32 *residual[FLAC__MAX_CHANNELS]; /* WATCHOUT: these are the aligned pointers; the real pointers that should be free()'d are residual_unaligned[] below */
@@ -334,7 +343,9 @@
for(i = 0; i < FLAC__MAX_CHANNELS; i++)
FLAC__format_entropy_coding_method_partitioned_rice_contents_init(&decoder->private_->partitioned_rice_contents[i]);
+#ifndef COBALT
decoder->private_->file = 0;
+#endif // COBALT
set_defaults_(decoder);
@@ -513,6 +524,7 @@
);
}
+#ifndef COBALT
FLAC_API FLAC__StreamDecoderInitStatus FLAC__stream_decoder_init_ogg_stream(
FLAC__StreamDecoder *decoder,
FLAC__StreamDecoderReadCallback read_callback,
@@ -665,6 +677,7 @@
{
return init_file_internal_(decoder, filename, write_callback, metadata_callback, error_callback, client_data, /*is_ogg=*/true);
}
+#endif // COBALT
FLAC_API FLAC__bool FLAC__stream_decoder_finish(FLAC__StreamDecoder *decoder)
{
@@ -713,11 +726,13 @@
FLAC__ogg_decoder_aspect_finish(&decoder->protected_->ogg_decoder_aspect);
#endif
+#ifndef COBALT
if(0 != decoder->private_->file) {
if(decoder->private_->file != stdin)
fclose(decoder->private_->file);
decoder->private_->file = 0;
}
+#endif // COBALT
if(decoder->private_->do_md5_checking) {
if(memcmp(decoder->private_->stream_info.data.stream_info.md5sum, decoder->private_->computed_md5sum, 16))
@@ -1004,8 +1019,10 @@
* not seekable.
*/
if(!decoder->private_->internal_reset_hack) {
+#ifndef COBALT
if(decoder->private_->file == stdin)
return false; /* can't rewind stdin, reset fails */
+#endif // COBALT
if(decoder->private_->seek_callback && decoder->private_->seek_callback(decoder, 0, decoder->private_->client_data) == FLAC__STREAM_DECODER_SEEK_STATUS_ERROR)
return false; /* seekable and seek fails, reset fails */
}
@@ -1283,6 +1300,7 @@
#endif
}
+#ifndef COBALT
/*
* This will forcibly set stdin to binary mode (for OSes that require it)
*/
@@ -1303,6 +1321,7 @@
return stdin;
}
+#endif // COBALT
FLAC__bool allocate_output_(FLAC__StreamDecoder *decoder, unsigned size, unsigned channels)
{
@@ -3326,6 +3345,7 @@
}
#endif
+#ifndef COBALT
FLAC__StreamDecoderReadStatus file_read_callback_(const FLAC__StreamDecoder *decoder, FLAC__byte buffer[], size_t *bytes, void *client_data)
{
(void)client_data;
@@ -3391,3 +3411,4 @@
return feof(decoder->private_->file)? true : false;
}
+#endif // COBALT
diff --git a/src/third_party/flac/src/libFLAC/stream_encoder.c b/src/third_party/flac/src/libFLAC/stream_encoder.c
index b908660..adad610 100644
--- a/src/third_party/flac/src/libFLAC/stream_encoder.c
+++ b/src/third_party/flac/src/libFLAC/stream_encoder.c
@@ -33,6 +33,7 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#if defined _MSC_VER || defined __MINGW32__
#include <io.h> /* for _setmode() */
#include <fcntl.h> /* for _O_BINARY */
@@ -46,6 +47,12 @@
#include <stdlib.h> /* for malloc() */
#include <string.h> /* for memcpy() */
#include <sys/types.h> /* for off_t */
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/stdlib_poem.h"
+#include "starboard/client_porting/poem/stdio_poem.h"
+#include "starboard/client_porting/poem/string_poem.h"
+
#if defined _MSC_VER || defined __BORLANDC__ || defined __MINGW32__
#if _MSC_VER <= 1600 || defined __BORLANDC__ /* @@@ [2G limit] */
#define fseeko fseek
@@ -54,9 +61,6 @@
#define fseeko _fseeki64
#define ftello _ftelli64
#endif
-#elif defined(__LB_SHELL__)
-#define fseeko fseek
-#define ftello ftell
#endif
#include "FLAC/assert.h"
#include "FLAC/stream_decoder.h"
@@ -322,12 +326,13 @@
static void verify_metadata_callback_(const FLAC__StreamDecoder *decoder, const FLAC__StreamMetadata *metadata, void *client_data);
static void verify_error_callback_(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorStatus status, void *client_data);
+#ifndef COBALT
static FLAC__StreamEncoderReadStatus file_read_callback_(const FLAC__StreamEncoder *encoder, FLAC__byte buffer[], size_t *bytes, void *client_data);
static FLAC__StreamEncoderSeekStatus file_seek_callback_(const FLAC__StreamEncoder *encoder, FLAC__uint64 absolute_byte_offset, void *client_data);
static FLAC__StreamEncoderTellStatus file_tell_callback_(const FLAC__StreamEncoder *encoder, FLAC__uint64 *absolute_byte_offset, void *client_data);
static FLAC__StreamEncoderWriteStatus file_write_callback_(const FLAC__StreamEncoder *encoder, const FLAC__byte buffer[], size_t bytes, unsigned samples, unsigned current_frame, void *client_data);
static FILE *get_binary_stdout_(void);
-
+#endif // COBALT
/***********************************************************************
*
@@ -401,7 +406,9 @@
FLAC__StreamEncoderProgressCallback progress_callback;
void *client_data;
unsigned first_seekpoint_to_check;
+#ifndef COBALT
FILE *file; /* only used when encoding to a file */
+#endif // COBALT
FLAC__uint64 bytes_written;
FLAC__uint64 samples_written;
unsigned frames_written;
@@ -558,7 +565,9 @@
return 0;
}
+#ifndef COBALT
encoder->private_->file = 0;
+#endif // COBALT
set_defaults_(encoder);
@@ -1165,6 +1174,7 @@
);
}
+#ifndef COBALT
FLAC_API FLAC__StreamEncoderInitStatus FLAC__stream_encoder_init_ogg_stream(
FLAC__StreamEncoder *encoder,
FLAC__StreamEncoderReadCallback read_callback,
@@ -1318,6 +1328,7 @@
{
return init_file_internal_(encoder, filename, progress_callback, client_data, /*is_ogg=*/true);
}
+#endif // COBALT
FLAC_API FLAC__bool FLAC__stream_encoder_finish(FLAC__StreamEncoder *encoder)
{
@@ -1367,11 +1378,13 @@
}
}
+#ifndef COBALT
if(0 != encoder->private_->file) {
if(encoder->private_->file != stdout)
fclose(encoder->private_->file);
encoder->private_->file = 0;
}
+#endif // COBALT
#if FLAC__HAS_OGG
if(encoder->private_->is_ogg)
@@ -3756,9 +3769,9 @@
/* save best parameters and raw_bits */
FLAC__format_entropy_coding_method_partitioned_rice_contents_ensure_size(prc, max(6, best_partition_order));
- memcpy(prc->parameters, private_->partitioned_rice_contents_extra[best_parameters_index].parameters, sizeof(unsigned)*(1<<(best_partition_order)));
+ memcpy(prc->parameters, private_->partitioned_rice_contents_extra[best_parameters_index].parameters, sizeof(unsigned)*(1LL<<(best_partition_order)));
if(do_escape_coding)
- memcpy(prc->raw_bits, private_->partitioned_rice_contents_extra[best_parameters_index].raw_bits, sizeof(unsigned)*(1<<(best_partition_order)));
+ memcpy(prc->raw_bits, private_->partitioned_rice_contents_extra[best_parameters_index].raw_bits, sizeof(unsigned)*(1LL<<(best_partition_order)));
/*
* Now need to check if the type should be changed to
* FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2 based on the
@@ -4259,6 +4272,7 @@
encoder->protected_->state = FLAC__STREAM_ENCODER_VERIFY_DECODER_ERROR;
}
+#ifndef COBALT
FLAC__StreamEncoderReadStatus file_read_callback_(const FLAC__StreamEncoder *encoder, FLAC__byte buffer[], size_t *bytes, void *client_data)
{
(void)client_data;
@@ -4363,3 +4377,4 @@
return stdout;
}
+#endif // COBALT
diff --git a/src/third_party/flac/src/libFLAC/stream_encoder_framing.c b/src/third_party/flac/src/libFLAC/stream_encoder_framing.c
index 939955b..4ab29fe 100644
--- a/src/third_party/flac/src/libFLAC/stream_encoder_framing.c
+++ b/src/third_party/flac/src/libFLAC/stream_encoder_framing.c
@@ -33,8 +33,13 @@
# include <config.h>
#endif
+#ifndef STARBOARD
#include <stdio.h>
#include <string.h> /* for strlen() */
+#endif // STARBOARD
+
+#include "starboard/client_porting/poem/string_poem.h"
+
#include "private/stream_encoder_framing.h"
#include "private/crc.h"
#include "FLAC/assert.h"
diff --git a/src/third_party/freetype2/freetype2_cobalt.gyp b/src/third_party/freetype2/freetype2_cobalt.gyp
index 3da71b5..1ec9f02 100644
--- a/src/third_party/freetype2/freetype2_cobalt.gyp
+++ b/src/third_party/freetype2/freetype2_cobalt.gyp
@@ -1,61 +1,68 @@
-# Copyright (c) 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'ft2_dir': '<(DEPTH)/third_party/freetype2',
- },
- 'targets': [
- {
- 'target_name': 'freetype2',
- 'type': 'static_library',
- 'toolsets': ['target'],
- 'sources': [
- '<(ft2_dir)/src/autofit/autofit.c',
- '<(ft2_dir)/src/base/ftbase.c',
- '<(ft2_dir)/src/base/ftbbox.c',
- '<(ft2_dir)/src/base/ftbitmap.c',
- '<(ft2_dir)/src/base/ftfntfmt.c',
- '<(ft2_dir)/src/base/ftfstype.c',
- '<(ft2_dir)/src/base/ftgasp.c',
- '<(ft2_dir)/src/base/ftglyph.c',
- '<(ft2_dir)/src/base/ftinit.c',
- '<(ft2_dir)/src/base/ftlcdfil.c',
- '<(ft2_dir)/src/base/ftmm.c',
- '<(ft2_dir)/src/base/ftstroke.c',
- '<(ft2_dir)/src/base/ftsystem.c',
- '<(ft2_dir)/src/base/fttype1.c',
- '<(ft2_dir)/src/cff/cff.c',
- '<(ft2_dir)/src/gzip/ftgzip.c',
- '<(ft2_dir)/src/pshinter/pshinter.c',
- '<(ft2_dir)/src/psnames/psnames.c',
- '<(ft2_dir)/src/raster/raster.c',
- '<(ft2_dir)/src/sfnt/sfnt.c',
- '<(ft2_dir)/src/smooth/smooth.c',
- '<(ft2_dir)/src/truetype/truetype.c',
- ],
- 'defines': [
- 'FT_CONFIG_OPTION_SYSTEM_ZLIB',
- 'FT2_BUILD_LIBRARY',
- 'FT_CONFIG_MODULES_H="ftmodule.h"',
- 'FT_CONFIG_OPTIONS_H="ftoption.h"',
- ],
- 'include_dirs': [
- '<(ft2_dir)/include_cobalt',
- '<(ft2_dir)/include',
- ],
- 'dependencies': [
- '<(DEPTH)/third_party/libpng/libpng.gyp:libpng',
- '<(DEPTH)/third_party/zlib/zlib.gyp:zlib',
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '<(ft2_dir)/include_cobalt',
- '<(ft2_dir)/include',
- ],
- },
- 'msvs_disabled_warnings': [4146],
- },
- ], # targets
-}
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'ft2_dir': '<(DEPTH)/third_party/freetype2',
+ },
+ 'targets': [
+ {
+ 'target_name': 'freetype2',
+ 'type': 'static_library',
+ 'toolsets': ['target'],
+ 'sources': [
+ '<(ft2_dir)/src/autofit/autofit.c',
+ '<(ft2_dir)/src/base/ftbase.c',
+ '<(ft2_dir)/src/base/ftbbox.c',
+ '<(ft2_dir)/src/base/ftbitmap.c',
+ '<(ft2_dir)/src/base/ftfntfmt.c',
+ '<(ft2_dir)/src/base/ftfstype.c',
+ '<(ft2_dir)/src/base/ftgasp.c',
+ '<(ft2_dir)/src/base/ftglyph.c',
+ '<(ft2_dir)/src/base/ftinit.c',
+ '<(ft2_dir)/src/base/ftlcdfil.c',
+ '<(ft2_dir)/src/base/ftmm.c',
+ '<(ft2_dir)/src/base/ftstroke.c',
+ '<(ft2_dir)/src/base/ftsystem.c',
+ '<(ft2_dir)/src/base/fttype1.c',
+ '<(ft2_dir)/src/cff/cff.c',
+ '<(ft2_dir)/src/gzip/ftgzip.c',
+ '<(ft2_dir)/src/pshinter/pshinter.c',
+ '<(ft2_dir)/src/psnames/psnames.c',
+ '<(ft2_dir)/src/raster/raster.c',
+ '<(ft2_dir)/src/sfnt/sfnt.c',
+ '<(ft2_dir)/src/smooth/smooth.c',
+ '<(ft2_dir)/src/truetype/truetype.c',
+ ],
+ 'defines': [
+ 'FT_CONFIG_OPTION_SYSTEM_ZLIB',
+ 'FT2_BUILD_LIBRARY',
+ 'FT_CONFIG_CONFIG_H="ftconfig.h"',
+ 'FT_CONFIG_MODULES_H="ftmodule.h"',
+ 'FT_CONFIG_OPTIONS_H="ftoption.h"',
+ ],
+ 'include_dirs': [
+ '<(ft2_dir)/include_cobalt',
+ '<(ft2_dir)/include',
+ ],
+ 'dependencies': [
+ '<(DEPTH)/third_party/libpng/libpng.gyp:libpng',
+ '<(DEPTH)/third_party/zlib/zlib.gyp:zlib',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '<(ft2_dir)/include_cobalt',
+ '<(ft2_dir)/include',
+ ],
+ 'defines': [
+ 'FT_CONFIG_OPTION_SYSTEM_ZLIB',
+ 'FT_CONFIG_CONFIG_H="ftconfig.h"',
+ 'FT_CONFIG_MODULES_H="ftmodule.h"',
+ 'FT_CONFIG_OPTIONS_H="ftoption.h"',
+ ],
+ },
+ 'msvs_disabled_warnings': [4146],
+ },
+ ], # targets
+}
diff --git a/src/third_party/freetype2/include/freetype/config/ftconfig.h b/src/third_party/freetype2/include/freetype/config/ftconfig.h
index 473272a..e01ee9f 100644
--- a/src/third_party/freetype2/include/freetype/config/ftconfig.h
+++ b/src/third_party/freetype2/include/freetype/config/ftconfig.h
@@ -38,6 +38,10 @@
#ifndef __FTCONFIG_H__
#define __FTCONFIG_H__
+// This file should not be included, as we override ftconfig.h by defining
+// FT_CONFIG_OPTIONS_H via GYP, and changing the include order.
+#error "Do not include this file in Cobalt."
+
#include <ft2build.h>
#include FT_CONFIG_OPTIONS_H
#include FT_CONFIG_STANDARD_LIBRARY_H
diff --git a/src/third_party/freetype2/include/freetype/config/ftmodule.h b/src/third_party/freetype2/include/freetype/config/ftmodule.h
index cea4569..719f03a 100644
--- a/src/third_party/freetype2/include/freetype/config/ftmodule.h
+++ b/src/third_party/freetype2/include/freetype/config/ftmodule.h
@@ -10,6 +10,11 @@
*
*/
+// This file should not be included, as we override ftmodule.h by defining
+// FT_CONFIG_MODULES_H via GYP, and changing the include order.
+
+#error "Do not include this file in Cobalt."
+
FT_USE_MODULE( FT_Module_Class, autofit_module_class )
FT_USE_MODULE( FT_Driver_ClassRec, tt_driver_class )
FT_USE_MODULE( FT_Driver_ClassRec, t1_driver_class )
diff --git a/src/third_party/freetype2/include/freetype/config/ftoption.h b/src/third_party/freetype2/include/freetype/config/ftoption.h
index bbfd896..dc6fa85 100644
--- a/src/third_party/freetype2/include/freetype/config/ftoption.h
+++ b/src/third_party/freetype2/include/freetype/config/ftoption.h
@@ -15,6 +15,9 @@
/* */
/***************************************************************************/
+// This file should not be included, as we override ftoption.h by defining
+// FT_CONFIG_OPTIONS_H via GYP, and changing the include order.
+#error "Do not include this file in Cobalt."
#ifndef __FTOPTION_H__
#define __FTOPTION_H__
diff --git a/src/third_party/freetype2/include_cobalt/ftconfig.h b/src/third_party/freetype2/include_cobalt/ftconfig.h
new file mode 100644
index 0000000..bc624e9
--- /dev/null
+++ b/src/third_party/freetype2/include_cobalt/ftconfig.h
@@ -0,0 +1,447 @@
+/***************************************************************************/
+/* */
+/* ftconfig.h */
+/* */
+/* ANSI-specific configuration file (specification only). */
+/* */
+/* Copyright 1996-2015 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+/*************************************************************************/
+/* */
+/* This header file contains a number of macro definitions that are used */
+/* by the rest of the engine. Most of the macros here are automatically */
+/* determined at compile time, and you should not need to change it to */
+/* port FreeType, except to compile the library with a non-ANSI */
+/* compiler. */
+/* */
+/* Note however that if some specific modifications are needed, we */
+/* advise you to place a modified copy in your build directory. */
+/* */
+/* The build directory is usually `builds/<system>', and contains */
+/* system-specific files that are always included first when building */
+/* the library. */
+/* */
+/* This ANSI version should stay in `include/config/'. */
+/* */
+/*************************************************************************/
+
+// This file is mostly unchanged, but it is specified here so that we can
+// set FT_CONFIG_OPTIONS_H to "ftoption.h" and the right file will be included.
+
+#ifndef __FTCONFIG_H__
+#define __FTCONFIG_H__
+
+#include <ft2build.h>
+#include FT_CONFIG_OPTIONS_H
+#include FT_CONFIG_STANDARD_LIBRARY_H
+
+FT_BEGIN_HEADER
+
+/*************************************************************************/
+/* */
+/* PLATFORM-SPECIFIC CONFIGURATION MACROS */
+/* */
+/* These macros can be toggled to suit a specific system. The current */
+/* ones are defaults used to compile FreeType in an ANSI C environment */
+/* (16bit compilers are also supported). Copy this file to your own */
+/* `builds/<system>' directory, and edit it to port the engine. */
+/* */
+/*************************************************************************/
+
+/* There are systems (like the Texas Instruments 'C54x) where a `char' */
+/* has 16 bits. ANSI C says that sizeof(char) is always 1. Since an */
+/* `int' has 16 bits also for this system, sizeof(int) gives 1 which */
+/* is probably unexpected. */
+/* */
+/* `CHAR_BIT' (defined in limits.h) gives the number of bits in a */
+/* `char' type. */
+
+#ifndef FT_CHAR_BIT
+#define FT_CHAR_BIT CHAR_BIT
+#endif
+
+/* The size of an `int' type. */
+#if FT_UINT_MAX == 0xFFFFUL
+#define FT_SIZEOF_INT (16 / FT_CHAR_BIT)
+#elif FT_UINT_MAX == 0xFFFFFFFFUL
+#define FT_SIZEOF_INT (32 / FT_CHAR_BIT)
+#elif FT_UINT_MAX > 0xFFFFFFFFUL && FT_UINT_MAX == 0xFFFFFFFFFFFFFFFFUL
+#define FT_SIZEOF_INT (64 / FT_CHAR_BIT)
+#else
+#error "Unsupported size of `int' type!"
+#endif
+
+/* The size of a `long' type. A five-byte `long' (as used e.g. on the */
+/* DM642) is recognized but avoided. */
+#if FT_ULONG_MAX == 0xFFFFFFFFUL
+#define FT_SIZEOF_LONG (32 / FT_CHAR_BIT)
+#elif FT_ULONG_MAX > 0xFFFFFFFFUL && FT_ULONG_MAX == 0xFFFFFFFFFFUL
+#define FT_SIZEOF_LONG (32 / FT_CHAR_BIT)
+#elif FT_ULONG_MAX > 0xFFFFFFFFUL && FT_ULONG_MAX == 0xFFFFFFFFFFFFFFFFUL
+#define FT_SIZEOF_LONG (64 / FT_CHAR_BIT)
+#else
+#error "Unsupported size of `long' type!"
+#endif
+
+/* FT_UNUSED is a macro used to indicate that a given parameter is not */
+/* used -- this is only used to get rid of unpleasant compiler warnings */
+#ifndef FT_UNUSED
+#define FT_UNUSED(arg) ((arg) = (arg))
+#endif
+
+/*************************************************************************/
+/* */
+/* AUTOMATIC CONFIGURATION MACROS */
+/* */
+/* These macros are computed from the ones defined above. Don't touch */
+/* their definition, unless you know precisely what you are doing. No */
+/* porter should need to mess with them. */
+/* */
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Mac support */
+/* */
+/* This is the only necessary change, so it is defined here instead */
+/* providing a new configuration file. */
+/* */
+#if defined(STARBOARD)
+/* Nothing. */
+#elif defined(__APPLE__) || (defined(__MWERKS__) && defined(macintosh))
+/* no Carbon frameworks for 64bit 10.4.x */
+/* AvailabilityMacros.h is available since Mac OS X 10.2, */
+/* so guess the system version by maximum errno before inclusion */
+#include <errno.h>
+#ifdef ECANCELED /* defined since 10.2 */
+#include "AvailabilityMacros.h"
+#endif
+#if defined(__LP64__) && \
+ (MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4)
+#undef FT_MACINTOSH
+#endif
+
+#elif defined(__SC__) || defined(__MRC__)
+/* Classic MacOS compilers */
+#include "ConditionalMacros.h"
+#if TARGET_OS_MAC
+#define FT_MACINTOSH 1
+#endif
+
+#endif
+
+/*************************************************************************/
+/* */
+/* <Section> */
+/* basic_types */
+/* */
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* FT_Int16 */
+/* */
+/* <Description> */
+/* A typedef for a 16bit signed integer type. */
+/* */
+typedef signed short FT_Int16;
+
+/*************************************************************************/
+/* */
+/* <Type> */
+/* FT_UInt16 */
+/* */
+/* <Description> */
+/* A typedef for a 16bit unsigned integer type. */
+/* */
+typedef unsigned short FT_UInt16;
+
+/* */
+
+/* this #if 0 ... #endif clause is for documentation purposes */
+#if 0
+
+ /*************************************************************************/
+ /* */
+ /* <Type> */
+ /* FT_Int32 */
+ /* */
+ /* <Description> */
+ /* A typedef for a 32bit signed integer type. The size depends on */
+ /* the configuration. */
+ /* */
+ typedef signed XXX FT_Int32;
+
+
+ /*************************************************************************/
+ /* */
+ /* <Type> */
+ /* FT_UInt32 */
+ /* */
+ /* A typedef for a 32bit unsigned integer type. The size depends on */
+ /* the configuration. */
+ /* */
+ typedef unsigned XXX FT_UInt32;
+
+
+ /*************************************************************************/
+ /* */
+ /* <Type> */
+ /* FT_Int64 */
+ /* */
+ /* A typedef for a 64bit signed integer type. The size depends on */
+ /* the configuration. Only defined if there is real 64bit support; */
+ /* otherwise, it gets emulated with a structure (if necessary). */
+ /* */
+ typedef signed XXX FT_Int64;
+
+
+ /*************************************************************************/
+ /* */
+ /* <Type> */
+ /* FT_UInt64 */
+ /* */
+ /* A typedef for a 64bit unsigned integer type. The size depends on */
+ /* the configuration. Only defined if there is real 64bit support; */
+ /* otherwise, it gets emulated with a structure (if necessary). */
+ /* */
+ typedef unsigned XXX FT_UInt64;
+
+ /* */
+
+#endif
+
+#if FT_SIZEOF_INT == (32 / FT_CHAR_BIT)
+
+typedef signed int FT_Int32;
+typedef unsigned int FT_UInt32;
+
+#elif FT_SIZEOF_LONG == (32 / FT_CHAR_BIT)
+
+typedef signed long FT_Int32;
+typedef unsigned long FT_UInt32;
+
+#else
+#error "no 32bit type found -- please check your configuration files"
+#endif
+
+/* look up an integer type that is at least 32 bits */
+#if FT_SIZEOF_INT >= (32 / FT_CHAR_BIT)
+
+typedef int FT_Fast;
+typedef unsigned int FT_UFast;
+
+#elif FT_SIZEOF_LONG >= (32 / FT_CHAR_BIT)
+
+typedef long FT_Fast;
+typedef unsigned long FT_UFast;
+
+#endif
+
+/* determine whether we have a 64-bit int type for platforms without */
+/* Autoconf */
+#if FT_SIZEOF_LONG == (64 / FT_CHAR_BIT)
+
+/* FT_LONG64 must be defined if a 64-bit type is available */
+#define FT_LONG64
+#define FT_INT64 long
+#define FT_UINT64 unsigned long
+
+/*************************************************************************/
+/* */
+/* A 64-bit data type may create compilation problems if you compile */
+/* in strict ANSI mode. To avoid them, we disable other 64-bit data */
+/* types if __STDC__ is defined. You can however ignore this rule */
+/* by defining the FT_CONFIG_OPTION_FORCE_INT64 configuration macro. */
+/* */
+#elif !defined(__STDC__) || defined(FT_CONFIG_OPTION_FORCE_INT64)
+
+#if defined(_MSC_VER) && _MSC_VER >= 900 /* Visual C++ (and Intel C++) */
+
+/* this compiler provides the __int64 type */
+#define FT_LONG64
+#define FT_INT64 __int64
+#define FT_UINT64 unsigned __int64
+
+#elif defined(__BORLANDC__) /* Borland C++ */
+
+/* XXXX: We should probably check the value of __BORLANDC__ in order */
+/* to test the compiler version. */
+
+/* this compiler provides the __int64 type */
+#define FT_LONG64
+#define FT_INT64 __int64
+#define FT_UINT64 unsigned __int64
+
+#elif defined(__WATCOMC__) /* Watcom C++ */
+
+/* Watcom doesn't provide 64-bit data types */
+
+#elif defined(__MWERKS__) /* Metrowerks CodeWarrior */
+
+#define FT_LONG64
+#define FT_INT64 long long int
+#define FT_UINT64 unsigned long long int
+
+#elif defined(__GNUC__)
+
+/* GCC provides the `long long' type */
+#define FT_LONG64
+#define FT_INT64 long long int
+#define FT_UINT64 unsigned long long int
+
+#endif /* _MSC_VER */
+
+#endif /* FT_SIZEOF_LONG == (64 / FT_CHAR_BIT) */
+
+#ifdef FT_LONG64
+typedef FT_INT64 FT_Int64;
+typedef FT_UINT64 FT_UInt64;
+#endif
+
+/*************************************************************************/
+/* */
+/* miscellaneous */
+/* */
+/*************************************************************************/
+
+#define FT_BEGIN_STMNT do {
+#define FT_END_STMNT \
+ } \
+ while (0)
+#define FT_DUMMY_STMNT FT_BEGIN_STMNT FT_END_STMNT
+
+/* typeof condition taken from gnulib's `intprops.h' header file */
+#if (__GNUC__ >= 2 || defined(__IBM__TYPEOF__) || \
+ (__SUNPRO_C >= 0x5110 && !__STDC__))
+#define FT_TYPEOF(type) (__typeof__(type))
+#else
+#define FT_TYPEOF(type) /* empty */
+#endif
+
+#ifdef FT_MAKE_OPTION_SINGLE_OBJECT
+
+#define FT_LOCAL(x) static x
+#define FT_LOCAL_DEF(x) static x
+
+#else
+
+#ifdef __cplusplus
+#define FT_LOCAL(x) extern "C" x
+#define FT_LOCAL_DEF(x) extern "C" x
+#else
+#define FT_LOCAL(x) extern x
+#define FT_LOCAL_DEF(x) x
+#endif
+
+#endif /* FT_MAKE_OPTION_SINGLE_OBJECT */
+
+#define FT_LOCAL_ARRAY(x) extern const x
+#define FT_LOCAL_ARRAY_DEF(x) const x
+
+#ifndef FT_BASE
+
+#ifdef __cplusplus
+#define FT_BASE(x) extern "C" x
+#else
+#define FT_BASE(x) extern x
+#endif
+
+#endif /* !FT_BASE */
+
+#ifndef FT_BASE_DEF
+
+#ifdef __cplusplus
+#define FT_BASE_DEF(x) x
+#else
+#define FT_BASE_DEF(x) x
+#endif
+
+#endif /* !FT_BASE_DEF */
+
+#ifndef FT_EXPORT
+
+#ifdef __cplusplus
+#define FT_EXPORT(x) extern "C" x
+#else
+#define FT_EXPORT(x) extern x
+#endif
+
+#endif /* !FT_EXPORT */
+
+#ifndef FT_EXPORT_DEF
+
+#ifdef __cplusplus
+#define FT_EXPORT_DEF(x) extern "C" x
+#else
+#define FT_EXPORT_DEF(x) extern x
+#endif
+
+#endif /* !FT_EXPORT_DEF */
+
+#ifndef FT_EXPORT_VAR
+
+#ifdef __cplusplus
+#define FT_EXPORT_VAR(x) extern "C" x
+#else
+#define FT_EXPORT_VAR(x) extern x
+#endif
+
+#endif /* !FT_EXPORT_VAR */
+
+/* The following macros are needed to compile the library with a */
+/* C++ compiler and with 16bit compilers. */
+/* */
+
+/* This is special. Within C++, you must specify `extern "C"' for */
+/* functions which are used via function pointers, and you also */
+/* must do that for structures which contain function pointers to */
+/* assure C linkage -- it's not possible to have (local) anonymous */
+/* functions which are accessed by (global) function pointers. */
+/* */
+/* */
+/* FT_CALLBACK_DEF is used to _define_ a callback function. */
+/* */
+/* FT_CALLBACK_TABLE is used to _declare_ a constant variable that */
+/* contains pointers to callback functions. */
+/* */
+/* FT_CALLBACK_TABLE_DEF is used to _define_ a constant variable */
+/* that contains pointers to callback functions. */
+/* */
+/* */
+/* Some 16bit compilers have to redefine these macros to insert */
+/* the infamous `_cdecl' or `__fastcall' declarations. */
+/* */
+#ifndef FT_CALLBACK_DEF
+#ifdef __cplusplus
+#define FT_CALLBACK_DEF(x) extern "C" x
+#else
+#define FT_CALLBACK_DEF(x) static x
+#endif
+#endif /* FT_CALLBACK_DEF */
+
+#ifndef FT_CALLBACK_TABLE
+#ifdef __cplusplus
+#define FT_CALLBACK_TABLE extern "C"
+#define FT_CALLBACK_TABLE_DEF extern "C"
+#else
+#define FT_CALLBACK_TABLE extern
+#define FT_CALLBACK_TABLE_DEF /* nothing */
+#endif
+#endif /* FT_CALLBACK_TABLE */
+
+FT_END_HEADER
+
+#endif /* __FTCONFIG_H__ */
+
+/* END */
diff --git a/src/third_party/freetype2/include_cobalt/ftoption.h b/src/third_party/freetype2/include_cobalt/ftoption.h
index 6f38e6d..36f93fc 100644
--- a/src/third_party/freetype2/include_cobalt/ftoption.h
+++ b/src/third_party/freetype2/include_cobalt/ftoption.h
@@ -1,886 +1,849 @@
-/***************************************************************************/
-/* */
-/* ftoption.h */
-/* */
-/* User-selectable configuration macros (specification only). */
-/* */
-/* Copyright 1996-2015 by */
-/* David Turner, Robert Wilhelm, and Werner Lemberg. */
-/* */
-/* This file is part of the FreeType project, and may only be used, */
-/* modified, and distributed under the terms of the FreeType project */
-/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
-/* this file you indicate that you have read the license and */
-/* understand and accept it fully. */
-/* */
-/***************************************************************************/
-
-
-#ifndef COBALT___FTOPTION_H__
-#define COBALT___FTOPTION_H__
-
-
-#include <ft2build.h>
-
-
-FT_BEGIN_HEADER
-
- /*************************************************************************/
- /* */
- /* USER-SELECTABLE CONFIGURATION MACROS */
- /* */
- /* This file contains the default configuration macro definitions for */
- /* a standard build of the FreeType library. There are three ways to */
- /* use this file to build project-specific versions of the library: */
- /* */
- /* - You can modify this file by hand, but this is not recommended in */
- /* cases where you would like to build several versions of the */
- /* library from a single source directory. */
- /* */
- /* - You can put a copy of this file in your build directory, more */
- /* precisely in `$BUILD/config/ftoption.h', where `$BUILD' is the */
- /* name of a directory that is included _before_ the FreeType include */
- /* path during compilation. */
- /* */
- /* The default FreeType Makefiles and Jamfiles use the build */
- /* directory `builds/<system>' by default, but you can easily change */
- /* that for your own projects. */
- /* */
- /* - Copy the file <ft2build.h> to `$BUILD/ft2build.h' and modify it */
- /* slightly to pre-define the macro FT_CONFIG_OPTIONS_H used to */
- /* locate this file during the build. For example, */
- /* */
- /* #define FT_CONFIG_OPTIONS_H <myftoptions.h> */
- /* #include <config/ftheader.h> */
- /* */
- /* will use `$BUILD/myftoptions.h' instead of this file for macro */
- /* definitions. */
- /* */
- /* Note also that you can similarly pre-define the macro */
- /* FT_CONFIG_MODULES_H used to locate the file listing of the modules */
- /* that are statically linked to the library at compile time. By */
- /* default, this file is <config/ftmodule.h>. */
- /* */
- /* We highly recommend using the third method whenever possible. */
- /* */
- /*************************************************************************/
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** G E N E R A L F R E E T Y P E 2 C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
-
- /*************************************************************************/
- /* */
- /* Uncomment the line below if you want to activate sub-pixel rendering */
- /* (a.k.a. LCD rendering, or ClearType) in this build of the library. */
- /* */
- /* Note that this feature is covered by several Microsoft patents */
- /* and should not be activated in any default build of the library. */
- /* */
- /* This macro has no impact on the FreeType API, only on its */
- /* _implementation_. For example, using FT_RENDER_MODE_LCD when calling */
- /* FT_Render_Glyph still generates a bitmap that is 3 times wider than */
- /* the original size in case this macro isn't defined; however, each */
- /* triplet of subpixels has R=G=B. */
- /* */
- /* This is done to allow FreeType clients to run unmodified, forcing */
- /* them to display normal gray-level anti-aliased glyphs. */
- /* */
-/* #define FT_CONFIG_OPTION_SUBPIXEL_RENDERING */
-
-
- /*************************************************************************/
- /* */
- /* Many compilers provide a non-ANSI 64-bit data type that can be used */
- /* by FreeType to speed up some computations. However, this will create */
- /* some problems when compiling the library in strict ANSI mode. */
- /* */
- /* For this reason, the use of 64-bit integers is normally disabled when */
- /* the __STDC__ macro is defined. You can however disable this by */
- /* defining the macro FT_CONFIG_OPTION_FORCE_INT64 here. */
- /* */
- /* For most compilers, this will only create compilation warnings when */
- /* building the library. */
- /* */
- /* ObNote: The compiler-specific 64-bit integers are detected in the */
- /* file `ftconfig.h' either statically or through the */
- /* `configure' script on supported platforms. */
- /* */
-#undef FT_CONFIG_OPTION_FORCE_INT64
-
-
- /*************************************************************************/
- /* */
- /* If this macro is defined, do not try to use an assembler version of */
- /* performance-critical functions (e.g. FT_MulFix). You should only do */
- /* that to verify that the assembler function works properly, or to */
- /* execute benchmark tests of the various implementations. */
-/* #define FT_CONFIG_OPTION_NO_ASSEMBLER */
-
-
- /*************************************************************************/
- /* */
- /* If this macro is defined, try to use an inlined assembler version of */
- /* the `FT_MulFix' function, which is a `hotspot' when loading and */
- /* hinting glyphs, and which should be executed as fast as possible. */
- /* */
- /* Note that if your compiler or CPU is not supported, this will default */
- /* to the standard and portable implementation found in `ftcalc.c'. */
- /* */
-#define FT_CONFIG_OPTION_INLINE_MULFIX
-
-
- /*************************************************************************/
- /* */
- /* LZW-compressed file support. */
- /* */
- /* FreeType now handles font files that have been compressed with the */
- /* `compress' program. This is mostly used to parse many of the PCF */
- /* files that come with various X11 distributions. The implementation */
- /* uses NetBSD's `zopen' to partially uncompress the file on the fly */
- /* (see src/lzw/ftgzip.c). */
- /* */
- /* Define this macro if you want to enable this `feature'. */
- /* */
-#define FT_CONFIG_OPTION_USE_LZW
-
-
- /*************************************************************************/
- /* */
- /* Gzip-compressed file support. */
- /* */
- /* FreeType now handles font files that have been compressed with the */
- /* `gzip' program. This is mostly used to parse many of the PCF files */
- /* that come with XFree86. The implementation uses `zlib' to */
- /* partially uncompress the file on the fly (see src/gzip/ftgzip.c). */
- /* */
- /* Define this macro if you want to enable this `feature'. See also */
- /* the macro FT_CONFIG_OPTION_SYSTEM_ZLIB below. */
- /* */
-#define FT_CONFIG_OPTION_USE_ZLIB
-
-
- /*************************************************************************/
- /* */
- /* ZLib library selection */
- /* */
- /* This macro is only used when FT_CONFIG_OPTION_USE_ZLIB is defined. */
- /* It allows FreeType's `ftgzip' component to link to the system's */
- /* installation of the ZLib library. This is useful on systems like */
- /* Unix or VMS where it generally is already available. */
- /* */
- /* If you let it undefined, the component will use its own copy */
- /* of the zlib sources instead. These have been modified to be */
- /* included directly within the component and *not* export external */
- /* function names. This allows you to link any program with FreeType */
- /* _and_ ZLib without linking conflicts. */
- /* */
- /* Do not #undef this macro here since the build system might define */
- /* it for certain configurations only. */
- /* */
-/* #define FT_CONFIG_OPTION_SYSTEM_ZLIB */
-
-
- /*************************************************************************/
- /* */
- /* Bzip2-compressed file support. */
- /* */
- /* FreeType now handles font files that have been compressed with the */
- /* `bzip2' program. This is mostly used to parse many of the PCF */
- /* files that come with XFree86. The implementation uses `libbz2' to */
- /* partially uncompress the file on the fly (see src/bzip2/ftbzip2.c). */
- /* Contrary to gzip, bzip2 currently is not included and need to use */
- /* the system available bzip2 implementation. */
- /* */
- /* Define this macro if you want to enable this `feature'. */
- /* */
-/* #define FT_CONFIG_OPTION_USE_BZIP2 */
-
-
- /*************************************************************************/
- /* */
- /* Define to disable the use of file stream functions and types, FILE, */
- /* fopen() etc. Enables the use of smaller system libraries on embedded */
- /* systems that have multiple system libraries, some with or without */
- /* file stream support, in the cases where file stream support is not */
- /* necessary such as memory loading of font files. */
- /* */
-/* #define FT_CONFIG_OPTION_DISABLE_STREAM_SUPPORT */
-
-
- /*************************************************************************/
- /* */
- /* PNG bitmap support. */
- /* */
- /* FreeType now handles loading color bitmap glyphs in the PNG format. */
- /* This requires help from the external libpng library. Uncompressed */
- /* color bitmaps do not need any external libraries and will be */
- /* supported regardless of this configuration. */
- /* */
- /* Define this macro if you want to enable this `feature'. */
- /* */
-#define FT_CONFIG_OPTION_USE_PNG
-
-
- /*************************************************************************/
- /* */
- /* HarfBuzz support. */
- /* */
- /* FreeType uses the HarfBuzz library to improve auto-hinting of */
- /* OpenType fonts. If available, many glyphs not directly addressable */
- /* by a font's character map will be hinted also. */
- /* */
- /* Define this macro if you want to enable this `feature'. */
- /* */
-/* #define FT_CONFIG_OPTION_USE_HARFBUZZ */
-
-
- /*************************************************************************/
- /* */
- /* DLL export compilation */
- /* */
- /* When compiling FreeType as a DLL, some systems/compilers need a */
- /* special keyword in front OR after the return type of function */
- /* declarations. */
- /* */
- /* Two macros are used within the FreeType source code to define */
- /* exported library functions: FT_EXPORT and FT_EXPORT_DEF. */
- /* */
- /* FT_EXPORT( return_type ) */
- /* */
- /* is used in a function declaration, as in */
- /* */
- /* FT_EXPORT( FT_Error ) */
- /* FT_Init_FreeType( FT_Library* alibrary ); */
- /* */
- /* */
- /* FT_EXPORT_DEF( return_type ) */
- /* */
- /* is used in a function definition, as in */
- /* */
- /* FT_EXPORT_DEF( FT_Error ) */
- /* FT_Init_FreeType( FT_Library* alibrary ) */
- /* { */
- /* ... some code ... */
- /* return FT_Err_Ok; */
- /* } */
- /* */
- /* You can provide your own implementation of FT_EXPORT and */
- /* FT_EXPORT_DEF here if you want. If you leave them undefined, they */
- /* will be later automatically defined as `extern return_type' to */
- /* allow normal compilation. */
- /* */
- /* Do not #undef these macros here since the build system might define */
- /* them for certain configurations only. */
- /* */
-/* #define FT_EXPORT(x) extern x */
-/* #define FT_EXPORT_DEF(x) x */
-
-
- /*************************************************************************/
- /* */
- /* Glyph Postscript Names handling */
- /* */
- /* By default, FreeType 2 is compiled with the `psnames' module. This */
- /* module is in charge of converting a glyph name string into a */
- /* Unicode value, or return a Macintosh standard glyph name for the */
- /* use with the TrueType `post' table. */
- /* */
- /* Undefine this macro if you do not want `psnames' compiled in your */
- /* build of FreeType. This has the following effects: */
- /* */
- /* - The TrueType driver will provide its own set of glyph names, */
- /* if you build it to support postscript names in the TrueType */
- /* `post' table. */
- /* */
- /* - The Type 1 driver will not be able to synthesize a Unicode */
- /* charmap out of the glyphs found in the fonts. */
- /* */
- /* You would normally undefine this configuration macro when building */
- /* a version of FreeType that doesn't contain a Type 1 or CFF driver. */
- /* */
-#define FT_CONFIG_OPTION_POSTSCRIPT_NAMES
-
-
- /*************************************************************************/
- /* */
- /* Postscript Names to Unicode Values support */
- /* */
- /* By default, FreeType 2 is built with the `PSNames' module compiled */
- /* in. Among other things, the module is used to convert a glyph name */
- /* into a Unicode value. This is especially useful in order to */
- /* synthesize on the fly a Unicode charmap from the CFF/Type 1 driver */
- /* through a big table named the `Adobe Glyph List' (AGL). */
- /* */
- /* Undefine this macro if you do not want the Adobe Glyph List */
- /* compiled in your `PSNames' module. The Type 1 driver will not be */
- /* able to synthesize a Unicode charmap out of the glyphs found in the */
- /* fonts. */
- /* */
-#define FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
-
-
- /*************************************************************************/
- /* */
- /* Support for Mac fonts */
- /* */
- /* Define this macro if you want support for outline fonts in Mac */
- /* format (mac dfont, mac resource, macbinary containing a mac */
- /* resource) on non-Mac platforms. */
- /* */
- /* Note that the `FOND' resource isn't checked. */
- /* */
-#define FT_CONFIG_OPTION_MAC_FONTS
-
-
- /*************************************************************************/
- /* */
- /* Guessing methods to access embedded resource forks */
- /* */
- /* Enable extra Mac fonts support on non-Mac platforms (e.g. */
- /* GNU/Linux). */
- /* */
- /* Resource forks which include fonts data are stored sometimes in */
- /* locations which users or developers don't expected. In some cases, */
- /* resource forks start with some offset from the head of a file. In */
- /* other cases, the actual resource fork is stored in file different */
- /* from what the user specifies. If this option is activated, */
- /* FreeType tries to guess whether such offsets or different file */
- /* names must be used. */
- /* */
- /* Note that normal, direct access of resource forks is controlled via */
- /* the FT_CONFIG_OPTION_MAC_FONTS option. */
- /* */
-#ifdef FT_CONFIG_OPTION_MAC_FONTS
-#define FT_CONFIG_OPTION_GUESSING_EMBEDDED_RFORK
-#endif
-
-
- /*************************************************************************/
- /* */
- /* Allow the use of FT_Incremental_Interface to load typefaces that */
- /* contain no glyph data, but supply it via a callback function. */
- /* This is required by clients supporting document formats which */
- /* supply font data incrementally as the document is parsed, such */
- /* as the Ghostscript interpreter for the PostScript language. */
- /* */
-/* #define FT_CONFIG_OPTION_INCREMENTAL */
-
-
- /*************************************************************************/
- /* */
- /* The size in bytes of the render pool used by the scan-line converter */
- /* to do all of its work. */
- /* */
-#define FT_RENDER_POOL_SIZE 16384L
-
-
- /*************************************************************************/
- /* */
- /* FT_MAX_MODULES */
- /* */
- /* The maximum number of modules that can be registered in a single */
- /* FreeType library object. 32 is the default. */
- /* */
-#define FT_MAX_MODULES 32
-
-
- /*************************************************************************/
- /* */
- /* Debug level */
- /* */
- /* FreeType can be compiled in debug or trace mode. In debug mode, */
- /* errors are reported through the `ftdebug' component. In trace */
- /* mode, additional messages are sent to the standard output during */
- /* execution. */
- /* */
- /* Define FT_DEBUG_LEVEL_ERROR to build the library in debug mode. */
- /* Define FT_DEBUG_LEVEL_TRACE to build it in trace mode. */
- /* */
- /* Don't define any of these macros to compile in `release' mode! */
- /* */
- /* Do not #undef these macros here since the build system might define */
- /* them for certain configurations only. */
- /* */
-/* #define FT_DEBUG_LEVEL_ERROR */
-/* #define FT_DEBUG_LEVEL_TRACE */
-
-
- /*************************************************************************/
- /* */
- /* Autofitter debugging */
- /* */
- /* If FT_DEBUG_AUTOFIT is defined, FreeType provides some means to */
- /* control the autofitter behaviour for debugging purposes with global */
- /* boolean variables (consequently, you should *never* enable this */
- /* while compiling in `release' mode): */
- /* */
- /* _af_debug_disable_horz_hints */
- /* _af_debug_disable_vert_hints */
- /* _af_debug_disable_blue_hints */
- /* */
- /* Additionally, the following functions provide dumps of various */
- /* internal autofit structures to stdout (using `printf'): */
- /* */
- /* af_glyph_hints_dump_points */
- /* af_glyph_hints_dump_segments */
- /* af_glyph_hints_dump_edges */
- /* af_glyph_hints_get_num_segments */
- /* af_glyph_hints_get_segment_offset */
- /* */
- /* As an argument, they use another global variable: */
- /* */
- /* _af_debug_hints */
- /* */
- /* Please have a look at the `ftgrid' demo program to see how those */
- /* variables and macros should be used. */
- /* */
- /* Do not #undef these macros here since the build system might define */
- /* them for certain configurations only. */
- /* */
-/* #define FT_DEBUG_AUTOFIT */
-
-
- /*************************************************************************/
- /* */
- /* Memory Debugging */
- /* */
- /* FreeType now comes with an integrated memory debugger that is */
- /* capable of detecting simple errors like memory leaks or double */
- /* deletes. To compile it within your build of the library, you */
- /* should define FT_DEBUG_MEMORY here. */
- /* */
- /* Note that the memory debugger is only activated at runtime when */
- /* when the _environment_ variable `FT2_DEBUG_MEMORY' is defined also! */
- /* */
- /* Do not #undef this macro here since the build system might define */
- /* it for certain configurations only. */
- /* */
-/* #define FT_DEBUG_MEMORY */
-
-
- /*************************************************************************/
- /* */
- /* Module errors */
- /* */
- /* If this macro is set (which is _not_ the default), the higher byte */
- /* of an error code gives the module in which the error has occurred, */
- /* while the lower byte is the real error code. */
- /* */
- /* Setting this macro makes sense for debugging purposes only, since */
- /* it would break source compatibility of certain programs that use */
- /* FreeType 2. */
- /* */
- /* More details can be found in the files ftmoderr.h and fterrors.h. */
- /* */
-#undef FT_CONFIG_OPTION_USE_MODULE_ERRORS
-
-
- /*************************************************************************/
- /* */
- /* Position Independent Code */
- /* */
- /* If this macro is set (which is _not_ the default), FreeType2 will */
- /* avoid creating constants that require address fixups. Instead the */
- /* constants will be moved into a struct and additional intialization */
- /* code will be used. */
- /* */
- /* Setting this macro is needed for systems that prohibit address */
- /* fixups, such as BREW. */
- /* */
-/* #define FT_CONFIG_OPTION_PIC */
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** S F N T D R I V E R C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_EMBEDDED_BITMAPS if you want to support */
- /* embedded bitmaps in all formats using the SFNT module (namely */
- /* TrueType & OpenType). */
- /* */
-#define TT_CONFIG_OPTION_EMBEDDED_BITMAPS
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_POSTSCRIPT_NAMES if you want to be able to */
- /* load and enumerate the glyph Postscript names in a TrueType or */
- /* OpenType file. */
- /* */
- /* Note that when you do not compile the `PSNames' module by undefining */
- /* the above FT_CONFIG_OPTION_POSTSCRIPT_NAMES, the `sfnt' module will */
- /* contain additional code used to read the PS Names table from a font. */
- /* */
- /* (By default, the module uses `PSNames' to extract glyph names.) */
- /* */
-#define TT_CONFIG_OPTION_POSTSCRIPT_NAMES
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_SFNT_NAMES if your applications need to */
- /* access the internal name table in a SFNT-based format like TrueType */
- /* or OpenType. The name table contains various strings used to */
- /* describe the font, like family name, copyright, version, etc. It */
- /* does not contain any glyph name though. */
- /* */
- /* Accessing SFNT names is done through the functions declared in */
- /* `ftsnames.h'. */
- /* */
-#define TT_CONFIG_OPTION_SFNT_NAMES
-
-
- /*************************************************************************/
- /* */
- /* TrueType CMap support */
- /* */
- /* Here you can fine-tune which TrueType CMap table format shall be */
- /* supported. */
-#define TT_CONFIG_CMAP_FORMAT_0
-#define TT_CONFIG_CMAP_FORMAT_2
-#define TT_CONFIG_CMAP_FORMAT_4
-#define TT_CONFIG_CMAP_FORMAT_6
-#define TT_CONFIG_CMAP_FORMAT_8
-#define TT_CONFIG_CMAP_FORMAT_10
-#define TT_CONFIG_CMAP_FORMAT_12
-#define TT_CONFIG_CMAP_FORMAT_13
-#define TT_CONFIG_CMAP_FORMAT_14
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** T R U E T Y P E D R I V E R C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_BYTECODE_INTERPRETER if you want to compile */
- /* a bytecode interpreter in the TrueType driver. */
- /* */
- /* By undefining this, you will only compile the code necessary to load */
- /* TrueType glyphs without hinting. */
- /* */
- /* Do not #undef this macro here, since the build system might */
- /* define it for certain configurations only. */
- /* */
-#define TT_CONFIG_OPTION_BYTECODE_INTERPRETER
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_SUBPIXEL_HINTING if you want to compile */
- /* EXPERIMENTAL subpixel hinting support into the TrueType driver. This */
- /* replaces the native TrueType hinting mechanism when anything but */
- /* FT_RENDER_MODE_MONO is requested. */
- /* */
- /* Enabling this causes the TrueType driver to ignore instructions under */
- /* certain conditions. This is done in accordance with the guide here, */
- /* with some minor differences: */
- /* */
- /* http://www.microsoft.com/typography/cleartype/truetypecleartype.aspx */
- /* */
- /* By undefining this, you only compile the code necessary to hint */
- /* TrueType glyphs with native TT hinting. */
- /* */
- /* This option requires TT_CONFIG_OPTION_BYTECODE_INTERPRETER to be */
- /* defined. */
- /* */
-/* #define TT_CONFIG_OPTION_SUBPIXEL_HINTING */
-
-
- /*************************************************************************/
- /* */
- /* If you define TT_CONFIG_OPTION_UNPATENTED_HINTING, a special version */
- /* of the TrueType bytecode interpreter is used that doesn't implement */
- /* any of the patented opcodes and algorithms. The patents related to */
- /* TrueType hinting have expired worldwide since May 2010; this option */
- /* is now deprecated. */
- /* */
- /* Note that the TT_CONFIG_OPTION_UNPATENTED_HINTING macro is *ignored* */
- /* if you define TT_CONFIG_OPTION_BYTECODE_INTERPRETER; in other words, */
- /* either define TT_CONFIG_OPTION_BYTECODE_INTERPRETER or */
- /* TT_CONFIG_OPTION_UNPATENTED_HINTING but not both at the same time. */
- /* */
- /* This macro is only useful for a small number of font files (mostly */
- /* for Asian scripts) that require bytecode interpretation to properly */
- /* load glyphs. For all other fonts, this produces unpleasant results, */
- /* thus the unpatented interpreter is never used to load glyphs from */
- /* TrueType fonts unless one of the following two options is used. */
- /* */
- /* - The unpatented interpreter is explicitly activated by the user */
- /* through the FT_PARAM_TAG_UNPATENTED_HINTING parameter tag */
- /* when opening the FT_Face. */
- /* */
- /* - FreeType detects that the FT_Face corresponds to one of the */
- /* `trick' fonts (e.g., `Mingliu') it knows about. The font engine */
- /* contains a hard-coded list of font names and other matching */
- /* parameters (see function `tt_face_init' in file */
- /* `src/truetype/ttobjs.c'). */
- /* */
- /* Here a sample code snippet for using FT_PARAM_TAG_UNPATENTED_HINTING. */
- /* */
- /* { */
- /* FT_Parameter parameter; */
- /* FT_Open_Args open_args; */
- /* */
- /* */
- /* parameter.tag = FT_PARAM_TAG_UNPATENTED_HINTING; */
- /* */
- /* open_args.flags = FT_OPEN_PATHNAME | FT_OPEN_PARAMS; */
- /* open_args.pathname = my_font_pathname; */
- /* open_args.num_params = 1; */
- /* open_args.params = ¶meter; */
- /* */
- /* error = FT_Open_Face( library, &open_args, index, &face ); */
- /* ... */
- /* } */
- /* */
-/* #define TT_CONFIG_OPTION_UNPATENTED_HINTING */
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED to compile the */
- /* TrueType glyph loader to use Apple's definition of how to handle */
- /* component offsets in composite glyphs. */
- /* */
- /* Apple and MS disagree on the default behavior of component offsets */
- /* in composites. Apple says that they should be scaled by the scaling */
- /* factors in the transformation matrix (roughly, it's more complex) */
- /* while MS says they should not. OpenType defines two bits in the */
- /* composite flags array which can be used to disambiguate, but old */
- /* fonts will not have them. */
- /* */
- /* http://www.microsoft.com/typography/otspec/glyf.htm */
- /* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6glyf.html */
- /* */
-#undef TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_GX_VAR_SUPPORT if you want to include */
- /* support for Apple's distortable font technology (fvar, gvar, cvar, */
- /* and avar tables). This has many similarities to Type 1 Multiple */
- /* Masters support. */
- /* */
-#define TT_CONFIG_OPTION_GX_VAR_SUPPORT
-
-
- /*************************************************************************/
- /* */
- /* Define TT_CONFIG_OPTION_BDF if you want to include support for */
- /* an embedded `BDF ' table within SFNT-based bitmap formats. */
- /* */
-#define TT_CONFIG_OPTION_BDF
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** T Y P E 1 D R I V E R C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
-
- /*************************************************************************/
- /* */
- /* T1_MAX_DICT_DEPTH is the maximum depth of nest dictionaries and */
- /* arrays in the Type 1 stream (see t1load.c). A minimum of 4 is */
- /* required. */
- /* */
-#define T1_MAX_DICT_DEPTH 5
-
-
- /*************************************************************************/
- /* */
- /* T1_MAX_SUBRS_CALLS details the maximum number of nested sub-routine */
- /* calls during glyph loading. */
- /* */
-#define T1_MAX_SUBRS_CALLS 16
-
-
- /*************************************************************************/
- /* */
- /* T1_MAX_CHARSTRING_OPERANDS is the charstring stack's capacity. A */
- /* minimum of 16 is required. */
- /* */
- /* The Chinese font MingTiEG-Medium (CNS 11643 character set) needs 256. */
- /* */
-#define T1_MAX_CHARSTRINGS_OPERANDS 256
-
-
- /*************************************************************************/
- /* */
- /* Define this configuration macro if you want to prevent the */
- /* compilation of `t1afm', which is in charge of reading Type 1 AFM */
- /* files into an existing face. Note that if set, the T1 driver will be */
- /* unable to produce kerning distances. */
- /* */
-#undef T1_CONFIG_OPTION_NO_AFM
-
-
- /*************************************************************************/
- /* */
- /* Define this configuration macro if you want to prevent the */
- /* compilation of the Multiple Masters font support in the Type 1 */
- /* driver. */
- /* */
-#undef T1_CONFIG_OPTION_NO_MM_SUPPORT
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** C F F D R I V E R C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
-
- /*************************************************************************/
- /* */
- /* Using CFF_CONFIG_OPTION_DARKENING_PARAMETER_{X,Y}{1,2,3,4} it is */
- /* possible to set up the default values of the four control points that */
- /* define the stem darkening behaviour of the (new) CFF engine. For */
- /* more details please read the documentation of the */
- /* `darkening-parameters' property of the cff driver module (file */
- /* `ftcffdrv.h'), which allows the control at run-time. */
- /* */
- /* Do *not* undefine these macros! */
- /* */
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 500
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 400
-
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 1000
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 275
-
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 1667
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 275
-
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 2333
-#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 0
-
-
- /*************************************************************************/
- /* */
- /* CFF_CONFIG_OPTION_OLD_ENGINE controls whether the pre-Adobe CFF */
- /* engine gets compiled into FreeType. If defined, it is possible to */
- /* switch between the two engines using the `hinting-engine' property of */
- /* the cff driver module. */
- /* */
-/* #define CFF_CONFIG_OPTION_OLD_ENGINE */
-
-
- /*************************************************************************/
- /*************************************************************************/
- /**** ****/
- /**** A U T O F I T M O D U L E C O N F I G U R A T I O N ****/
- /**** ****/
- /*************************************************************************/
- /*************************************************************************/
-
-
- /*************************************************************************/
- /* */
- /* Compile autofit module with CJK (Chinese, Japanese, Korean) script */
- /* support. */
- /* */
-#define AF_CONFIG_OPTION_CJK
-
- /*************************************************************************/
- /* */
- /* Compile autofit module with Indic script support. */
- /* */
-#define AF_CONFIG_OPTION_INDIC
-
- /*************************************************************************/
- /* */
- /* Compile autofit module with warp hinting. The idea of the warping */
- /* code is to slightly scale and shift a glyph within a single dimension */
- /* so that as much of its segments are aligned (more or less) on the */
- /* grid. To find out the optimal scaling and shifting value, various */
- /* parameter combinations are tried and scored. */
- /* */
- /* This experimental option is active only if the rendering mode is */
- /* FT_RENDER_MODE_LIGHT; you can switch warping on and off with the */
- /* `warping' property of the auto-hinter (see file `ftautoh.h' for more */
- /* information; by default it is switched off). */
- /* */
-/*#define AF_CONFIG_OPTION_USE_WARPER*/
-
- /* */
-
-
- /*
- * This macro is obsolete. Support has been removed in FreeType
- * version 2.5.
- */
-/* #define FT_CONFIG_OPTION_OLD_INTERNALS */
-
-
- /*
- * This macro is defined if either unpatented or native TrueType
- * hinting is requested by the definitions above.
- */
-#ifdef TT_CONFIG_OPTION_BYTECODE_INTERPRETER
-#define TT_USE_BYTECODE_INTERPRETER
-#undef TT_CONFIG_OPTION_UNPATENTED_HINTING
-#elif defined TT_CONFIG_OPTION_UNPATENTED_HINTING
-#define TT_USE_BYTECODE_INTERPRETER
-#endif
-
-
- /*
- * Check CFF darkening parameters. The checks are the same as in function
- * `cff_property_set' in file `cffdrivr.c'.
- */
-#if CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 < 0 || \
- \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 < 0 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 < 0 || \
- \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 > \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 > \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 > \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 || \
- \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 > 500 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 > 500 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 > 500 || \
- CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 > 500
-#error "Invalid CFF darkening parameters!"
-#endif
-
-FT_END_HEADER
-
-
-#endif /* COBALT___FTOPTION_H__ */
-
-
-/* END */
+/***************************************************************************/
+/* */
+/* ftoption.h */
+/* */
+/* User-selectable configuration macros (specification only). */
+/* */
+/* Copyright 1996-2015 by */
+/* David Turner, Robert Wilhelm, and Werner Lemberg. */
+/* */
+/* This file is part of the FreeType project, and may only be used, */
+/* modified, and distributed under the terms of the FreeType project */
+/* license, LICENSE.TXT. By continuing to use, modify, or distribute */
+/* this file you indicate that you have read the license and */
+/* understand and accept it fully. */
+/* */
+/***************************************************************************/
+
+#ifndef COBALT___FTOPTION_H__
+#define COBALT___FTOPTION_H__
+
+#include <ft2build.h>
+
+FT_BEGIN_HEADER
+
+/*************************************************************************/
+/* */
+/* USER-SELECTABLE CONFIGURATION MACROS */
+/* */
+/* This file contains the default configuration macro definitions for */
+/* a standard build of the FreeType library. There are three ways to */
+/* use this file to build project-specific versions of the library: */
+/* */
+/* - You can modify this file by hand, but this is not recommended in */
+/* cases where you would like to build several versions of the */
+/* library from a single source directory. */
+/* */
+/* - You can put a copy of this file in your build directory, more */
+/* precisely in `$BUILD/config/ftoption.h', where `$BUILD' is the */
+/* name of a directory that is included _before_ the FreeType include */
+/* path during compilation. */
+/* */
+/* The default FreeType Makefiles and Jamfiles use the build */
+/* directory `builds/<system>' by default, but you can easily change */
+/* that for your own projects. */
+/* */
+/* - Copy the file <ft2build.h> to `$BUILD/ft2build.h' and modify it */
+/* slightly to pre-define the macro FT_CONFIG_OPTIONS_H used to */
+/* locate this file during the build. For example, */
+/* */
+/* #define FT_CONFIG_OPTIONS_H <myftoptions.h> */
+/* #include <config/ftheader.h> */
+/* */
+/* will use `$BUILD/myftoptions.h' instead of this file for macro */
+/* definitions. */
+/* */
+/* Note also that you can similarly pre-define the macro */
+/* FT_CONFIG_MODULES_H used to locate the file listing of the modules */
+/* that are statically linked to the library at compile time. By */
+/* default, this file is <config/ftmodule.h>. */
+/* */
+/* We highly recommend using the third method whenever possible. */
+/* */
+/*************************************************************************/
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** G E N E R A L F R E E T Y P E 2 C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Uncomment the line below if you want to activate sub-pixel rendering */
+/* (a.k.a. LCD rendering, or ClearType) in this build of the library. */
+/* */
+/* Note that this feature is covered by several Microsoft patents */
+/* and should not be activated in any default build of the library. */
+/* */
+/* This macro has no impact on the FreeType API, only on its */
+/* _implementation_. For example, using FT_RENDER_MODE_LCD when calling */
+/* FT_Render_Glyph still generates a bitmap that is 3 times wider than */
+/* the original size in case this macro isn't defined; however, each */
+/* triplet of subpixels has R=G=B. */
+/* */
+/* This is done to allow FreeType clients to run unmodified, forcing */
+/* them to display normal gray-level anti-aliased glyphs. */
+/* */
+/* #define FT_CONFIG_OPTION_SUBPIXEL_RENDERING */
+
+/*************************************************************************/
+/* */
+/* Many compilers provide a non-ANSI 64-bit data type that can be used */
+/* by FreeType to speed up some computations. However, this will create */
+/* some problems when compiling the library in strict ANSI mode. */
+/* */
+/* For this reason, the use of 64-bit integers is normally disabled when */
+/* the __STDC__ macro is defined. You can however disable this by */
+/* defining the macro FT_CONFIG_OPTION_FORCE_INT64 here. */
+/* */
+/* For most compilers, this will only create compilation warnings when */
+/* building the library. */
+/* */
+/* ObNote: The compiler-specific 64-bit integers are detected in the */
+/* file `ftconfig.h' either statically or through the */
+/* `configure' script on supported platforms. */
+/* */
+#undef FT_CONFIG_OPTION_FORCE_INT64
+
+/*************************************************************************/
+/* */
+/* If this macro is defined, do not try to use an assembler version of */
+/* performance-critical functions (e.g. FT_MulFix). You should only do */
+/* that to verify that the assembler function works properly, or to */
+/* execute benchmark tests of the various implementations. */
+/* #define FT_CONFIG_OPTION_NO_ASSEMBLER */
+
+/*************************************************************************/
+/* */
+/* If this macro is defined, try to use an inlined assembler version of */
+/* the `FT_MulFix' function, which is a `hotspot' when loading and */
+/* hinting glyphs, and which should be executed as fast as possible. */
+/* */
+/* Note that if your compiler or CPU is not supported, this will default */
+/* to the standard and portable implementation found in `ftcalc.c'. */
+/* */
+#define FT_CONFIG_OPTION_INLINE_MULFIX
+
+/*************************************************************************/
+/* */
+/* LZW-compressed file support. */
+/* */
+/* FreeType now handles font files that have been compressed with the */
+/* `compress' program. This is mostly used to parse many of the PCF */
+/* files that come with various X11 distributions. The implementation */
+/* uses NetBSD's `zopen' to partially uncompress the file on the fly */
+/* (see src/lzw/ftgzip.c). */
+/* */
+/* Define this macro if you want to enable this `feature'. */
+/* */
+#define FT_CONFIG_OPTION_USE_LZW
+
+/*************************************************************************/
+/* */
+/* Gzip-compressed file support. */
+/* */
+/* FreeType now handles font files that have been compressed with the */
+/* `gzip' program. This is mostly used to parse many of the PCF files */
+/* that come with XFree86. The implementation uses `zlib' to */
+/* partially uncompress the file on the fly (see src/gzip/ftgzip.c). */
+/* */
+/* Define this macro if you want to enable this `feature'. See also */
+/* the macro FT_CONFIG_OPTION_SYSTEM_ZLIB below. */
+/* */
+#define FT_CONFIG_OPTION_USE_ZLIB
+
+/*************************************************************************/
+/* */
+/* ZLib library selection */
+/* */
+/* This macro is only used when FT_CONFIG_OPTION_USE_ZLIB is defined. */
+/* It allows FreeType's `ftgzip' component to link to the system's */
+/* installation of the ZLib library. This is useful on systems like */
+/* Unix or VMS where it generally is already available. */
+/* */
+/* If you let it undefined, the component will use its own copy */
+/* of the zlib sources instead. These have been modified to be */
+/* included directly within the component and *not* export external */
+/* function names. This allows you to link any program with FreeType */
+/* _and_ ZLib without linking conflicts. */
+/* */
+/* Do not #undef this macro here since the build system might define */
+/* it for certain configurations only. */
+/* */
+/* #define FT_CONFIG_OPTION_SYSTEM_ZLIB */
+
+/*************************************************************************/
+/* */
+/* Bzip2-compressed file support. */
+/* */
+/* FreeType now handles font files that have been compressed with the */
+/* `bzip2' program. This is mostly used to parse many of the PCF */
+/* files that come with XFree86. The implementation uses `libbz2' to */
+/* partially uncompress the file on the fly (see src/bzip2/ftbzip2.c). */
+/* Contrary to gzip, bzip2 currently is not included and need to use */
+/* the system available bzip2 implementation. */
+/* */
+/* Define this macro if you want to enable this `feature'. */
+/* */
+/* #define FT_CONFIG_OPTION_USE_BZIP2 */
+
+/*************************************************************************/
+/* */
+/* Define to disable the use of file stream functions and types, FILE, */
+/* fopen() etc. Enables the use of smaller system libraries on embedded */
+/* systems that have multiple system libraries, some with or without */
+/* file stream support, in the cases where file stream support is not */
+/* necessary such as memory loading of font files. */
+/* */
+/* #define FT_CONFIG_OPTION_DISABLE_STREAM_SUPPORT */
+
+/*************************************************************************/
+/* */
+/* PNG bitmap support. */
+/* */
+/* FreeType now handles loading color bitmap glyphs in the PNG format. */
+/* This requires help from the external libpng library. Uncompressed */
+/* color bitmaps do not need any external libraries and will be */
+/* supported regardless of this configuration. */
+/* */
+/* Define this macro if you want to enable this `feature'. */
+/* */
+#define FT_CONFIG_OPTION_USE_PNG
+
+/*************************************************************************/
+/* */
+/* HarfBuzz support. */
+/* */
+/* FreeType uses the HarfBuzz library to improve auto-hinting of */
+/* OpenType fonts. If available, many glyphs not directly addressable */
+/* by a font's character map will be hinted also. */
+/* */
+/* Define this macro if you want to enable this `feature'. */
+/* */
+/* #define FT_CONFIG_OPTION_USE_HARFBUZZ */
+
+/*************************************************************************/
+/* */
+/* DLL export compilation */
+/* */
+/* When compiling FreeType as a DLL, some systems/compilers need a */
+/* special keyword in front OR after the return type of function */
+/* declarations. */
+/* */
+/* Two macros are used within the FreeType source code to define */
+/* exported library functions: FT_EXPORT and FT_EXPORT_DEF. */
+/* */
+/* FT_EXPORT( return_type ) */
+/* */
+/* is used in a function declaration, as in */
+/* */
+/* FT_EXPORT( FT_Error ) */
+/* FT_Init_FreeType( FT_Library* alibrary ); */
+/* */
+/* */
+/* FT_EXPORT_DEF( return_type ) */
+/* */
+/* is used in a function definition, as in */
+/* */
+/* FT_EXPORT_DEF( FT_Error ) */
+/* FT_Init_FreeType( FT_Library* alibrary ) */
+/* { */
+/* ... some code ... */
+/* return FT_Err_Ok; */
+/* } */
+/* */
+/* You can provide your own implementation of FT_EXPORT and */
+/* FT_EXPORT_DEF here if you want. If you leave them undefined, they */
+/* will be later automatically defined as `extern return_type' to */
+/* allow normal compilation. */
+/* */
+/* Do not #undef these macros here since the build system might define */
+/* them for certain configurations only. */
+/* */
+/* #define FT_EXPORT(x) extern x */
+/* #define FT_EXPORT_DEF(x) x */
+
+/*************************************************************************/
+/* */
+/* Glyph Postscript Names handling */
+/* */
+/* By default, FreeType 2 is compiled with the `psnames' module. This */
+/* module is in charge of converting a glyph name string into a */
+/* Unicode value, or return a Macintosh standard glyph name for the */
+/* use with the TrueType `post' table. */
+/* */
+/* Undefine this macro if you do not want `psnames' compiled in your */
+/* build of FreeType. This has the following effects: */
+/* */
+/* - The TrueType driver will provide its own set of glyph names, */
+/* if you build it to support postscript names in the TrueType */
+/* `post' table. */
+/* */
+/* - The Type 1 driver will not be able to synthesize a Unicode */
+/* charmap out of the glyphs found in the fonts. */
+/* */
+/* You would normally undefine this configuration macro when building */
+/* a version of FreeType that doesn't contain a Type 1 or CFF driver. */
+/* */
+#define FT_CONFIG_OPTION_POSTSCRIPT_NAMES
+
+/*************************************************************************/
+/* */
+/* Postscript Names to Unicode Values support */
+/* */
+/* By default, FreeType 2 is built with the `PSNames' module compiled */
+/* in. Among other things, the module is used to convert a glyph name */
+/* into a Unicode value. This is especially useful in order to */
+/* synthesize on the fly a Unicode charmap from the CFF/Type 1 driver */
+/* through a big table named the `Adobe Glyph List' (AGL). */
+/* */
+/* Undefine this macro if you do not want the Adobe Glyph List */
+/* compiled in your `PSNames' module. The Type 1 driver will not be */
+/* able to synthesize a Unicode charmap out of the glyphs found in the */
+/* fonts. */
+/* */
+#define FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
+
+/*************************************************************************/
+/* */
+/* Support for Mac fonts */
+/* */
+/* Define this macro if you want support for outline fonts in Mac */
+/* format (mac dfont, mac resource, macbinary containing a mac */
+/* resource) on non-Mac platforms. */
+/* */
+/* Note that the `FOND' resource isn't checked. */
+/* */
+#define FT_CONFIG_OPTION_MAC_FONTS
+
+/*************************************************************************/
+/* */
+/* Guessing methods to access embedded resource forks */
+/* */
+/* Enable extra Mac fonts support on non-Mac platforms (e.g. */
+/* GNU/Linux). */
+/* */
+/* Resource forks which include fonts data are stored sometimes in */
+/* locations which users or developers don't expected. In some cases, */
+/* resource forks start with some offset from the head of a file. In */
+/* other cases, the actual resource fork is stored in file different */
+/* from what the user specifies. If this option is activated, */
+/* FreeType tries to guess whether such offsets or different file */
+/* names must be used. */
+/* */
+/* Note that normal, direct access of resource forks is controlled via */
+/* the FT_CONFIG_OPTION_MAC_FONTS option. */
+/* */
+#ifdef FT_CONFIG_OPTION_MAC_FONTS
+#define FT_CONFIG_OPTION_GUESSING_EMBEDDED_RFORK
+#endif
+
+/*************************************************************************/
+/* */
+/* Allow the use of FT_Incremental_Interface to load typefaces that */
+/* contain no glyph data, but supply it via a callback function. */
+/* This is required by clients supporting document formats which */
+/* supply font data incrementally as the document is parsed, such */
+/* as the Ghostscript interpreter for the PostScript language. */
+/* */
+/* #define FT_CONFIG_OPTION_INCREMENTAL */
+
+/*************************************************************************/
+/* */
+/* The size in bytes of the render pool used by the scan-line converter */
+/* to do all of its work. */
+/* */
+#define FT_RENDER_POOL_SIZE 16384L
+
+/*************************************************************************/
+/* */
+/* FT_MAX_MODULES */
+/* */
+/* The maximum number of modules that can be registered in a single */
+/* FreeType library object. 32 is the default. */
+/* */
+#define FT_MAX_MODULES 32
+
+/*************************************************************************/
+/* */
+/* Debug level */
+/* */
+/* FreeType can be compiled in debug or trace mode. In debug mode, */
+/* errors are reported through the `ftdebug' component. In trace */
+/* mode, additional messages are sent to the standard output during */
+/* execution. */
+/* */
+/* Define FT_DEBUG_LEVEL_ERROR to build the library in debug mode. */
+/* Define FT_DEBUG_LEVEL_TRACE to build it in trace mode. */
+/* */
+/* Don't define any of these macros to compile in `release' mode! */
+/* */
+/* Do not #undef these macros here since the build system might define */
+/* them for certain configurations only. */
+/* */
+/* #define FT_DEBUG_LEVEL_ERROR */
+/* #define FT_DEBUG_LEVEL_TRACE */
+
+/*************************************************************************/
+/* */
+/* Autofitter debugging */
+/* */
+/* If FT_DEBUG_AUTOFIT is defined, FreeType provides some means to */
+/* control the autofitter behaviour for debugging purposes with global */
+/* boolean variables (consequently, you should *never* enable this */
+/* while compiling in `release' mode): */
+/* */
+/* _af_debug_disable_horz_hints */
+/* _af_debug_disable_vert_hints */
+/* _af_debug_disable_blue_hints */
+/* */
+/* Additionally, the following functions provide dumps of various */
+/* internal autofit structures to stdout (using `printf'): */
+/* */
+/* af_glyph_hints_dump_points */
+/* af_glyph_hints_dump_segments */
+/* af_glyph_hints_dump_edges */
+/* af_glyph_hints_get_num_segments */
+/* af_glyph_hints_get_segment_offset */
+/* */
+/* As an argument, they use another global variable: */
+/* */
+/* _af_debug_hints */
+/* */
+/* Please have a look at the `ftgrid' demo program to see how those */
+/* variables and macros should be used. */
+/* */
+/* Do not #undef these macros here since the build system might define */
+/* them for certain configurations only. */
+/* */
+/* #define FT_DEBUG_AUTOFIT */
+
+/*************************************************************************/
+/* */
+/* Memory Debugging */
+/* */
+/* FreeType now comes with an integrated memory debugger that is */
+/* capable of detecting simple errors like memory leaks or double */
+/* deletes. To compile it within your build of the library, you */
+/* should define FT_DEBUG_MEMORY here. */
+/* */
+/* Note that the memory debugger is only activated at runtime when */
+/* when the _environment_ variable `FT2_DEBUG_MEMORY' is defined also! */
+/* */
+/* Do not #undef this macro here since the build system might define */
+/* it for certain configurations only. */
+/* */
+/* #define FT_DEBUG_MEMORY */
+
+/*************************************************************************/
+/* */
+/* Module errors */
+/* */
+/* If this macro is set (which is _not_ the default), the higher byte */
+/* of an error code gives the module in which the error has occurred, */
+/* while the lower byte is the real error code. */
+/* */
+/* Setting this macro makes sense for debugging purposes only, since */
+/* it would break source compatibility of certain programs that use */
+/* FreeType 2. */
+/* */
+/* More details can be found in the files ftmoderr.h and fterrors.h. */
+/* */
+#undef FT_CONFIG_OPTION_USE_MODULE_ERRORS
+
+/*************************************************************************/
+/* */
+/* Position Independent Code */
+/* */
+/* If this macro is set (which is _not_ the default), FreeType2 will */
+/* avoid creating constants that require address fixups. Instead the */
+/* constants will be moved into a struct and additional intialization */
+/* code will be used. */
+/* */
+/* Setting this macro is needed for systems that prohibit address */
+/* fixups, such as BREW. */
+/* */
+/* #define FT_CONFIG_OPTION_PIC */
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** S F N T D R I V E R C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_EMBEDDED_BITMAPS if you want to support */
+/* embedded bitmaps in all formats using the SFNT module (namely */
+/* TrueType & OpenType). */
+/* */
+#define TT_CONFIG_OPTION_EMBEDDED_BITMAPS
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_POSTSCRIPT_NAMES if you want to be able to */
+/* load and enumerate the glyph Postscript names in a TrueType or */
+/* OpenType file. */
+/* */
+/* Note that when you do not compile the `PSNames' module by undefining */
+/* the above FT_CONFIG_OPTION_POSTSCRIPT_NAMES, the `sfnt' module will */
+/* contain additional code used to read the PS Names table from a font. */
+/* */
+/* (By default, the module uses `PSNames' to extract glyph names.) */
+/* */
+#define TT_CONFIG_OPTION_POSTSCRIPT_NAMES
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_SFNT_NAMES if your applications need to */
+/* access the internal name table in a SFNT-based format like TrueType */
+/* or OpenType. The name table contains various strings used to */
+/* describe the font, like family name, copyright, version, etc. It */
+/* does not contain any glyph name though. */
+/* */
+/* Accessing SFNT names is done through the functions declared in */
+/* `ftsnames.h'. */
+/* */
+#define TT_CONFIG_OPTION_SFNT_NAMES
+
+/*************************************************************************/
+/* */
+/* TrueType CMap support */
+/* */
+/* Here you can fine-tune which TrueType CMap table format shall be */
+/* supported. */
+#define TT_CONFIG_CMAP_FORMAT_0
+#define TT_CONFIG_CMAP_FORMAT_2
+#define TT_CONFIG_CMAP_FORMAT_4
+#define TT_CONFIG_CMAP_FORMAT_6
+#define TT_CONFIG_CMAP_FORMAT_8
+#define TT_CONFIG_CMAP_FORMAT_10
+#define TT_CONFIG_CMAP_FORMAT_12
+#define TT_CONFIG_CMAP_FORMAT_13
+#define TT_CONFIG_CMAP_FORMAT_14
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** T R U E T Y P E D R I V E R C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_BYTECODE_INTERPRETER if you want to compile */
+/* a bytecode interpreter in the TrueType driver. */
+/* */
+/* By undefining this, you will only compile the code necessary to load */
+/* TrueType glyphs without hinting. */
+/* */
+/* Do not #undef this macro here, since the build system might */
+/* define it for certain configurations only. */
+/* */
+#define TT_CONFIG_OPTION_BYTECODE_INTERPRETER
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_SUBPIXEL_HINTING if you want to compile */
+/* EXPERIMENTAL subpixel hinting support into the TrueType driver. This */
+/* replaces the native TrueType hinting mechanism when anything but */
+/* FT_RENDER_MODE_MONO is requested. */
+/* */
+/* Enabling this causes the TrueType driver to ignore instructions under */
+/* certain conditions. This is done in accordance with the guide here, */
+/* with some minor differences: */
+/* */
+/* http://www.microsoft.com/typography/cleartype/truetypecleartype.aspx */
+/* */
+/* By undefining this, you only compile the code necessary to hint */
+/* TrueType glyphs with native TT hinting. */
+/* */
+/* This option requires TT_CONFIG_OPTION_BYTECODE_INTERPRETER to be */
+/* defined. */
+/* */
+/* #define TT_CONFIG_OPTION_SUBPIXEL_HINTING */
+
+/*************************************************************************/
+/* */
+/* If you define TT_CONFIG_OPTION_UNPATENTED_HINTING, a special version */
+/* of the TrueType bytecode interpreter is used that doesn't implement */
+/* any of the patented opcodes and algorithms. The patents related to */
+/* TrueType hinting have expired worldwide since May 2010; this option */
+/* is now deprecated. */
+/* */
+/* Note that the TT_CONFIG_OPTION_UNPATENTED_HINTING macro is *ignored* */
+/* if you define TT_CONFIG_OPTION_BYTECODE_INTERPRETER; in other words, */
+/* either define TT_CONFIG_OPTION_BYTECODE_INTERPRETER or */
+/* TT_CONFIG_OPTION_UNPATENTED_HINTING but not both at the same time. */
+/* */
+/* This macro is only useful for a small number of font files (mostly */
+/* for Asian scripts) that require bytecode interpretation to properly */
+/* load glyphs. For all other fonts, this produces unpleasant results, */
+/* thus the unpatented interpreter is never used to load glyphs from */
+/* TrueType fonts unless one of the following two options is used. */
+/* */
+/* - The unpatented interpreter is explicitly activated by the user */
+/* through the FT_PARAM_TAG_UNPATENTED_HINTING parameter tag */
+/* when opening the FT_Face. */
+/* */
+/* - FreeType detects that the FT_Face corresponds to one of the */
+/* `trick' fonts (e.g., `Mingliu') it knows about. The font engine */
+/* contains a hard-coded list of font names and other matching */
+/* parameters (see function `tt_face_init' in file */
+/* `src/truetype/ttobjs.c'). */
+/* */
+/* Here a sample code snippet for using FT_PARAM_TAG_UNPATENTED_HINTING. */
+/* */
+/* { */
+/* FT_Parameter parameter; */
+/* FT_Open_Args open_args; */
+/* */
+/* */
+/* parameter.tag = FT_PARAM_TAG_UNPATENTED_HINTING; */
+/* */
+/* open_args.flags = FT_OPEN_PATHNAME | FT_OPEN_PARAMS; */
+/* open_args.pathname = my_font_pathname; */
+/* open_args.num_params = 1; */
+/* open_args.params = ¶meter; */
+/* */
+/* error = FT_Open_Face( library, &open_args, index, &face ); */
+/* ... */
+/* } */
+/* */
+/* #define TT_CONFIG_OPTION_UNPATENTED_HINTING */
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED to compile the */
+/* TrueType glyph loader to use Apple's definition of how to handle */
+/* component offsets in composite glyphs. */
+/* */
+/* Apple and MS disagree on the default behavior of component offsets */
+/* in composites. Apple says that they should be scaled by the scaling */
+/* factors in the transformation matrix (roughly, it's more complex) */
+/* while MS says they should not. OpenType defines two bits in the */
+/* composite flags array which can be used to disambiguate, but old */
+/* fonts will not have them. */
+/* */
+/* http://www.microsoft.com/typography/otspec/glyf.htm */
+/* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6glyf.html
+ */
+/* */
+#undef TT_CONFIG_OPTION_COMPONENT_OFFSET_SCALED
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_GX_VAR_SUPPORT if you want to include */
+/* support for Apple's distortable font technology (fvar, gvar, cvar, */
+/* and avar tables). This has many similarities to Type 1 Multiple */
+/* Masters support. */
+/* */
+#define TT_CONFIG_OPTION_GX_VAR_SUPPORT
+
+/*************************************************************************/
+/* */
+/* Define TT_CONFIG_OPTION_BDF if you want to include support for */
+/* an embedded `BDF ' table within SFNT-based bitmap formats. */
+/* */
+#define TT_CONFIG_OPTION_BDF
+
+/*************************************************************************/
+/* */
+/* Option TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES controls the maximum */
+/* number of bytecode instructions executed for a single run of the */
+/* bytecode interpreter, needed to prevent infinite loops. You don't */
+/* want to change this except for very special situations (e.g., making */
+/* a library fuzzer spend less time to handle broken fonts). */
+/* */
+/* It is not expected that this value is ever modified by a configuring */
+/* script; instead, it gets surrounded with #ifndef ... #endif so that */
+/* the value can be set as a preprocessor option on the compiler's */
+/* command line. */
+/* */
+#ifndef TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES
+#define TT_CONFIG_OPTION_MAX_RUNNABLE_OPCODES 1000000L
+#endif
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** T Y P E 1 D R I V E R C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* T1_MAX_DICT_DEPTH is the maximum depth of nest dictionaries and */
+/* arrays in the Type 1 stream (see t1load.c). A minimum of 4 is */
+/* required. */
+/* */
+#define T1_MAX_DICT_DEPTH 5
+
+/*************************************************************************/
+/* */
+/* T1_MAX_SUBRS_CALLS details the maximum number of nested sub-routine */
+/* calls during glyph loading. */
+/* */
+#define T1_MAX_SUBRS_CALLS 16
+
+/*************************************************************************/
+/* */
+/* T1_MAX_CHARSTRING_OPERANDS is the charstring stack's capacity. A */
+/* minimum of 16 is required. */
+/* */
+/* The Chinese font MingTiEG-Medium (CNS 11643 character set) needs 256. */
+/* */
+#define T1_MAX_CHARSTRINGS_OPERANDS 256
+
+/*************************************************************************/
+/* */
+/* Define this configuration macro if you want to prevent the */
+/* compilation of `t1afm', which is in charge of reading Type 1 AFM */
+/* files into an existing face. Note that if set, the T1 driver will be */
+/* unable to produce kerning distances. */
+/* */
+#undef T1_CONFIG_OPTION_NO_AFM
+
+/*************************************************************************/
+/* */
+/* Define this configuration macro if you want to prevent the */
+/* compilation of the Multiple Masters font support in the Type 1 */
+/* driver. */
+/* */
+#undef T1_CONFIG_OPTION_NO_MM_SUPPORT
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** C F F D R I V E R C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Using CFF_CONFIG_OPTION_DARKENING_PARAMETER_{X,Y}{1,2,3,4} it is */
+/* possible to set up the default values of the four control points that */
+/* define the stem darkening behaviour of the (new) CFF engine. For */
+/* more details please read the documentation of the */
+/* `darkening-parameters' property of the cff driver module (file */
+/* `ftcffdrv.h'), which allows the control at run-time. */
+/* */
+/* Do *not* undefine these macros! */
+/* */
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 500
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 400
+
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 1000
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 275
+
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 1667
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 275
+
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 2333
+#define CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 0
+
+/*************************************************************************/
+/* */
+/* CFF_CONFIG_OPTION_OLD_ENGINE controls whether the pre-Adobe CFF */
+/* engine gets compiled into FreeType. If defined, it is possible to */
+/* switch between the two engines using the `hinting-engine' property of */
+/* the cff driver module. */
+/* */
+/* #define CFF_CONFIG_OPTION_OLD_ENGINE */
+
+/*************************************************************************/
+/*************************************************************************/
+/**** ****/
+/**** A U T O F I T M O D U L E C O N F I G U R A T I O N ****/
+/**** ****/
+/*************************************************************************/
+/*************************************************************************/
+
+/*************************************************************************/
+/* */
+/* Compile autofit module with CJK (Chinese, Japanese, Korean) script */
+/* support. */
+/* */
+#define AF_CONFIG_OPTION_CJK
+
+/*************************************************************************/
+/* */
+/* Compile autofit module with Indic script support. */
+/* */
+#define AF_CONFIG_OPTION_INDIC
+
+/*************************************************************************/
+/* */
+/* Compile autofit module with warp hinting. The idea of the warping */
+/* code is to slightly scale and shift a glyph within a single dimension */
+/* so that as much of its segments are aligned (more or less) on the */
+/* grid. To find out the optimal scaling and shifting value, various */
+/* parameter combinations are tried and scored. */
+/* */
+/* This experimental option is active only if the rendering mode is */
+/* FT_RENDER_MODE_LIGHT; you can switch warping on and off with the */
+/* `warping' property of the auto-hinter (see file `ftautoh.h' for more */
+/* information; by default it is switched off). */
+/* */
+/*#define AF_CONFIG_OPTION_USE_WARPER*/
+
+/* */
+
+/*
+ * This macro is obsolete. Support has been removed in FreeType
+ * version 2.5.
+ */
+/* #define FT_CONFIG_OPTION_OLD_INTERNALS */
+
+/*
+ * This macro is defined if either unpatented or native TrueType
+ * hinting is requested by the definitions above.
+ */
+#ifdef TT_CONFIG_OPTION_BYTECODE_INTERPRETER
+#define TT_USE_BYTECODE_INTERPRETER
+#undef TT_CONFIG_OPTION_UNPATENTED_HINTING
+#elif defined TT_CONFIG_OPTION_UNPATENTED_HINTING
+#define TT_USE_BYTECODE_INTERPRETER
+#endif
+
+/*
+ * Check CFF darkening parameters. The checks are the same as in function
+ * `cff_property_set' in file `cffdrivr.c'.
+ */
+#if CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 < 0 || \
+ \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 < 0 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 < 0 || \
+ \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X1 > \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X2 > \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X3 > \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_X4 || \
+ \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y1 > 500 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y2 > 500 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y3 > 500 || \
+ CFF_CONFIG_OPTION_DARKENING_PARAMETER_Y4 > 500
+#error "Invalid CFF darkening parameters!"
+#endif
+
+FT_END_HEADER
+
+#endif /* COBALT___FTOPTION_H__ */
+
+/* END */
diff --git a/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.cpp
new file mode 100644
index 0000000..d0d61ff
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.cpp
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/Architecture-mips.h"
+
+#include <elf.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#define HWCAP_MIPS (1 << 31)
+#define HWCAP_FPU (1 << 0)
+
+namespace js {
+namespace jit {
+
+uint32_t GetMIPSFlags()
+{
+ static bool isSet = false;
+ static uint32_t flags = 0;
+ if (isSet)
+ return flags;
+#if WTF_OS_LINUX
+ FILE *fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ return false;
+
+ char buf[1024];
+ memset(buf, 0, sizeof(buf));
+ fread(buf, sizeof(char), sizeof(buf)-1, fp);
+ fclose(fp);
+ if (strstr(buf, "FPU"))
+ flags |= HWCAP_FPU;
+
+ isSet = true;
+ return flags;
+#endif
+
+ return false;
+}
+
+bool hasFPU()
+{
+ return js::jit::GetMIPSFlags() & HWCAP_FPU;
+}
+
+Registers::Code
+Registers::FromName(const char *name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code
+FloatRegisters::FromName(const char *name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+
+
+} // namespace ion
+} // namespace js
+
diff --git a/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.h b/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.h
new file mode 100644
index 0000000..0bc7346
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Architecture-mips.h
@@ -0,0 +1,324 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_Architecture_mips_h
+#define jit_mips_Architecture_mips_h
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "js/Utility.h"
+
+// gcc appears to use _mips_hard_float to denote
+// that the target is a hard-float target.
+#ifdef _mips_hard_float
+#define JS_CODEGEN_MIPS_HARDFP
+#endif
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS.
+static const uint32_t ShadowStackSpace = 0;
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+// Size of MIPS32 general purpose registers is 32 bits.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+// Size of each bailout table entry.
+// For MIPS this is 2 instructions relative call.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 2 * sizeof(void *);
+
+class Registers
+{
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31,
+ invalid_reg
+ };
+ typedef RegisterID Code;
+
+ static const char *GetName(Code code) {
+ static const char * const Names[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"};
+ return Names[code];
+ }
+ static const char *GetName(uint32_t i) {
+ JS_ASSERT(i < Total);
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char *name);
+
+ static const Code StackPointer = sp;
+ static const Code Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t Allocatable = 14;
+
+ static const uint32_t AllMask = 0xffffffff;
+ static const uint32_t ArgRegMask = (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3);
+
+ static const uint32_t VolatileMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1) |
+ (1 << Registers::a0) |
+ (1 << Registers::a1) |
+ (1 << Registers::a2) |
+ (1 << Registers::a3) |
+ (1 << Registers::t0) |
+ (1 << Registers::t1) |
+ (1 << Registers::t2) |
+ (1 << Registers::t3) |
+ (1 << Registers::t4) |
+ (1 << Registers::t5) |
+ (1 << Registers::t6) |
+ (1 << Registers::t7);
+
+ static const uint32_t NonVolatileMask =
+ (1 << Registers::s0) |
+ (1 << Registers::s1) |
+ (1 << Registers::s2) |
+ (1 << Registers::s3) |
+ (1 << Registers::s4) |
+ (1 << Registers::s5) |
+ (1 << Registers::s6) |
+ (1 << Registers::s7);
+
+ static const uint32_t WrapperMask =
+ VolatileMask | // = arguments
+ (1 << Registers::t0) | // = outReg
+ (1 << Registers::t1); // = argBase
+
+ static const uint32_t NonAllocatableMask =
+ (1 << Registers::zero) |
+ (1 << Registers::at) | // at = scratch
+ (1 << Registers::t8) | // t8 = scratch
+ (1 << Registers::t9) | // t9 = scratch
+ (1 << Registers::k0) |
+ (1 << Registers::k1) |
+ (1 << Registers::gp) |
+ (1 << Registers::sp) |
+ (1 << Registers::fp) |
+ (1 << Registers::ra);
+
+ // Registers that can be allocated without being saved, generally.
+ static const uint32_t TempMask = VolatileMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const uint32_t JSCallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1);
+
+ // Registers returned from a JS -> C call.
+ static const uint32_t CallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1); // used for double-size returns
+
+ static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+
+// MIPS32 can have two types of floating-point coprocessors:
+// - 32 bit floating-point coprocessor - In this case, there are 32 single
+// precision registers and pairs of even and odd float registers are used as
+// double precision registers. Example: f0 (double) is composed of
+// f0 and f1 (single).
+// - 64 bit floating-point coprocessor - In this case, there are 32 double
+// precision register which can also be used as single precision registers.
+
+// When using O32 ABI, floating-point coprocessor is 32 bit
+// When using N32 ABI, floating-point coprocessor is 64 bit.
+class FloatRegisters
+{
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_freg
+ };
+ typedef FPRegisterID Code;
+
+ static const char *GetName(Code code) {
+ static const char * const Names[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19",
+ "f20", "f21", "f22", "f23", "f24", "f25",
+ "f26", "f27", "f28", "f29", "f30", "f31"};
+ return Names[code];
+ }
+ static const char *GetName(uint32_t i) {
+ JS_ASSERT(i < Total);
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char *name);
+
+ static const Code Invalid = invalid_freg;
+
+ static const uint32_t Total = 32;
+ // :TODO: (Bug 972836) // Fix this once odd regs can be used as float32
+ // only. For now we don't allocate odd regs for O32 ABI.
+ static const uint32_t Allocatable = 14;
+
+ static const uint32_t AllMask = 0xffffffff;
+
+ static const uint32_t VolatileMask =
+ (1 << FloatRegisters::f0) |
+ (1 << FloatRegisters::f2) |
+ (1 << FloatRegisters::f4) |
+ (1 << FloatRegisters::f6) |
+ (1 << FloatRegisters::f8) |
+ (1 << FloatRegisters::f10) |
+ (1 << FloatRegisters::f12) |
+ (1 << FloatRegisters::f14) |
+ (1 << FloatRegisters::f16) |
+ (1 << FloatRegisters::f18);
+ static const uint32_t NonVolatileMask =
+ (1 << FloatRegisters::f20) |
+ (1 << FloatRegisters::f22) |
+ (1 << FloatRegisters::f24) |
+ (1 << FloatRegisters::f26) |
+ (1 << FloatRegisters::f28) |
+ (1 << FloatRegisters::f30);
+
+ static const uint32_t WrapperMask = VolatileMask;
+
+ // :TODO: (Bug 972836) // Fix this once odd regs can be used as float32
+ // only. For now we don't allocate odd regs for O32 ABI.
+ static const uint32_t NonAllocatableMask =
+ (1 << FloatRegisters::f1) |
+ (1 << FloatRegisters::f3) |
+ (1 << FloatRegisters::f5) |
+ (1 << FloatRegisters::f7) |
+ (1 << FloatRegisters::f9) |
+ (1 << FloatRegisters::f11) |
+ (1 << FloatRegisters::f13) |
+ (1 << FloatRegisters::f15) |
+ (1 << FloatRegisters::f17) |
+ (1 << FloatRegisters::f19) |
+ (1 << FloatRegisters::f21) |
+ (1 << FloatRegisters::f23) |
+ (1 << FloatRegisters::f25) |
+ (1 << FloatRegisters::f27) |
+ (1 << FloatRegisters::f29) |
+ (1 << FloatRegisters::f31) |
+ // f18 and f16 are MIPS scratch float registers.
+ (1 << FloatRegisters::f16) |
+ (1 << FloatRegisters::f18);
+
+ // Registers that can be allocated without being saved, generally.
+ static const uint32_t TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const uint32_t AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+uint32_t GetMIPSFlags();
+bool hasFPU();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_Architecture_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.cpp
new file mode 100644
index 0000000..57fe57b
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.cpp
@@ -0,0 +1,1529 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/Assembler-mips.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+#include "jsutil.h"
+
+#include "assembler/jit/ExecutableAllocator.h"
+#include "gc/Marking.h"
+#include "jit/JitCompartment.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloat(false),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+ return ABIArg();
+}
+const Register ABIArgGenerator::NonArgReturnVolatileReg0 = t0;
+const Register ABIArgGenerator::NonArgReturnVolatileReg1 = t1;
+
+// Encode a standard register when it is being used as rd, the rs, and
+// an extra register(rt). These should never be called with an InvalidReg.
+uint32_t
+js::jit::RS(Register r)
+{
+ JS_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RSShift;
+}
+
+uint32_t
+js::jit::RT(Register r)
+{
+ JS_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RTShift;
+}
+
+uint32_t
+js::jit::RT(FloatRegister r)
+{
+ JS_ASSERT(r.code() < FloatRegisters::Total);
+ return r.code() << RTShift;
+}
+
+uint32_t
+js::jit::RD(Register r)
+{
+ JS_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RDShift;
+}
+
+uint32_t
+js::jit::RD(FloatRegister r)
+{
+ JS_ASSERT(r.code() < FloatRegisters::Total);
+ return r.code() << RDShift;
+}
+
+uint32_t
+js::jit::SA(uint32_t value)
+{
+ JS_ASSERT(value < 32);
+ return value << SAShift;
+}
+
+uint32_t
+js::jit::SA(FloatRegister r)
+{
+ JS_ASSERT(r.code() < FloatRegisters::Total);
+ return r.code() << SAShift;
+}
+
+Register
+js::jit::toRS(Instruction &i)
+{
+ return Register::FromCode((i.encode() & RSMask ) >> RSShift);
+}
+
+Register
+js::jit::toRT(Instruction &i)
+{
+ return Register::FromCode((i.encode() & RTMask ) >> RTShift);
+}
+
+Register
+js::jit::toRD(Instruction &i)
+{
+ return Register::FromCode((i.encode() & RDMask ) >> RDShift);
+}
+
+Register
+js::jit::toR(Instruction &i)
+{
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void
+InstImm::extractImm16(BOffImm16 *dest)
+{
+ *dest = BOffImm16(*this);
+}
+
+// Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch.
+void
+jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
+{
+ Instruction *inst1 = (Instruction *)jump_.raw();
+ Instruction *inst2 = inst1->next();
+
+ Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
+
+ AutoFlushICache::flush(uintptr_t(inst1), 8);
+}
+
+void
+Assembler::finish()
+{
+ JS_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+void
+Assembler::executableCopy(uint8_t *buffer)
+{
+ JS_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all long jumps during code copy.
+ for (size_t i = 0; i < longJumps_.length(); i++) {
+ Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]);
+
+ uint32_t value = extractLuiOriValue(inst1, inst1->next());
+ updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
+ }
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
+uint32_t
+Assembler::actualOffset(uint32_t off_) const
+{
+ return off_;
+}
+
+uint32_t
+Assembler::actualIndex(uint32_t idx_) const
+{
+ return idx_;
+}
+
+uint8_t *
+Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_)
+{
+ return code->raw() + pe_;
+}
+
+class RelocationIterator
+{
+ CompactBufferReader reader_;
+ // offset in bytes
+ uint32_t offset_;
+
+ public:
+ RelocationIterator(CompactBufferReader &reader)
+ : reader_(reader)
+ { }
+
+ bool read() {
+ if (!reader_.more())
+ return false;
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const {
+ return offset_;
+ }
+};
+
+uintptr_t
+Assembler::getPointer(uint8_t *instPtr)
+{
+ Instruction *inst = (Instruction*)instPtr;
+ return Assembler::extractLuiOriValue(inst, inst->next());
+}
+
+static JitCode *
+CodeFromJump(Instruction *jump)
+{
+ uint8_t *target = (uint8_t *)Assembler::extractLuiOriValue(jump, jump->next());
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
+{
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode *child = CodeFromJump((Instruction *)(code->raw() + iter.offset()));
+ MarkJitCodeUnbarriered(trc, &child, "rel32");
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction *inst = (Instruction*)(buffer + offset);
+ void *ptr = (void *)Assembler::extractLuiOriValue(inst, inst->next());
+
+ // No barrier needed since these are constants.
+ gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer *trc, MIPSBuffer *buffer, CompactBufferReader &reader)
+{
+ while (reader.more()) {
+ BufferOffset bo (reader.readUnsigned());
+ MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
+
+ void *ptr = (void *)Assembler::extractLuiOriValue(iter.cur(), iter.next());
+
+ // No barrier needed since these are constants.
+ gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+void
+Assembler::copyJumpRelocationTable(uint8_t *dest)
+{
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+}
+
+void
+Assembler::copyDataRelocationTable(uint8_t *dest)
+{
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+}
+
+void
+Assembler::copyPreBarrierTable(uint8_t *dest)
+{
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+}
+
+void
+Assembler::trace(JSTracer *trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch &rp = jumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode *code = JitCode::FromExecutable((uint8_t *)rp.target);
+ MarkJitCodeUnbarriered(trc, &code, "masmrel32");
+ JS_ASSERT(code == JitCode::FromExecutable((uint8_t *)rp.target));
+ }
+ }
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, &m_buffer, reader);
+ }
+}
+
+void
+Assembler::processCodeLabels(uint8_t *rawCode)
+{
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset()));
+ }
+}
+
+void
+Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address)
+{
+ if (label->used()) {
+ int32_t src = label->offset();
+ do {
+ Instruction *inst = (Instruction *) (rawCode + src);
+ uint32_t next = Assembler::extractLuiOriValue(inst, inst->next());
+ Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)address);
+ src = next;
+ } while (src != AbsoluteLabel::INVALID_OFFSET);
+ }
+ label->bind();
+}
+
+Assembler::Condition
+Assembler::InvertCondition(Condition cond)
+{
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unexpected condition");
+ return Equal;
+ }
+}
+
+Assembler::DoubleCondition
+Assembler::InvertCondition(DoubleCondition cond)
+{
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unexpected condition");
+ return DoubleEqual;
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst)
+ : data(inst.encode() & Imm16Mask)
+{
+}
+
+bool
+Assembler::oom() const
+{
+ return m_buffer.oom() ||
+ !enoughMemory_ ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+}
+
+bool
+Assembler::addCodeLabel(CodeLabel label)
+{
+ return codeLabels_.append(label);
+}
+
+// Size of the instruction stream, in bytes.
+size_t
+Assembler::size() const
+{
+ return m_buffer.size();
+}
+
+// Size of the relocation table, in bytes.
+size_t
+Assembler::jumpRelocationTableBytes() const
+{
+ return jumpRelocations_.length();
+}
+
+size_t
+Assembler::dataRelocationTableBytes() const
+{
+ return dataRelocations_.length();
+}
+
+size_t
+Assembler::preBarrierTableBytes() const
+{
+ return preBarriers_.length();
+}
+
+// Size of the data table, in bytes.
+size_t
+Assembler::bytesNeeded() const
+{
+ return size() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset
+Assembler::writeInst(uint32_t x, uint32_t *dest)
+{
+ if (dest == nullptr)
+ return m_buffer.putInt(x);
+
+ writeInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void
+Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
+{
+ JS_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset
+Assembler::align(int alignment)
+{
+ BufferOffset ret;
+ JS_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ } else {
+ JS_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ }
+ return ret;
+}
+
+BufferOffset
+Assembler::as_nop()
+{
+ return writeInst(op_special | ff_sll);
+}
+
+// Logical operations.
+BufferOffset
+Assembler::as_and(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
+}
+
+BufferOffset
+Assembler::as_or(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
+}
+
+BufferOffset
+Assembler::as_xor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
+}
+
+BufferOffset
+Assembler::as_nor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
+}
+
+BufferOffset
+Assembler::as_andi(Register rd, Register rs, int32_t j)
+{
+ JS_ASSERT(Imm16::isInUnsignedRange(j));
+ return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+Assembler::as_ori(Register rd, Register rs, int32_t j)
+{
+ JS_ASSERT(Imm16::isInUnsignedRange(j));
+ return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+Assembler::as_xori(Register rd, Register rs, int32_t j)
+{
+ JS_ASSERT(Imm16::isInUnsignedRange(j));
+ return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
+}
+
+// Branch and jump instructions
+BufferOffset
+Assembler::as_bal(BOffImm16 off)
+{
+ BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
+ return bo;
+}
+
+InstImm
+Assembler::getBranchCode(JumpOrCall jumpOrCall)
+{
+ if (jumpOrCall == BranchIsCall)
+ return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+
+ return InstImm(op_beq, zero, zero, BOffImm16(0));
+}
+
+InstImm
+Assembler::getBranchCode(Register s, Register t, Condition c)
+{
+ JS_ASSERT(c == Assembler::Equal || c == Assembler::NotEqual);
+ return InstImm(c == Assembler::Equal ? op_beq : op_bne, s, t, BOffImm16(0));
+}
+
+InstImm
+Assembler::getBranchCode(Register s, Condition c)
+{
+ switch (c) {
+ case Assembler::Equal:
+ case Assembler::Zero:
+ case Assembler::BelowOrEqual:
+ return InstImm(op_beq, s, zero, BOffImm16(0));
+ case Assembler::NotEqual:
+ case Assembler::NonZero:
+ case Assembler::Above:
+ return InstImm(op_bne, s, zero, BOffImm16(0));
+ case Assembler::GreaterThan:
+ return InstImm(op_bgtz, s, zero, BOffImm16(0));
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::NotSigned:
+ return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
+ case Assembler::LessThan:
+ case Assembler::Signed:
+ return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
+ case Assembler::LessThanOrEqual:
+ return InstImm(op_blez, s, zero, BOffImm16(0));
+ default:
+ MOZ_ASSUME_UNREACHABLE("Condition not supported.");
+ }
+}
+
+InstImm
+Assembler::getBranchCode(FloatTestKind testKind, FPConditionBit fcc)
+{
+ JS_ASSERT(!(fcc && FccMask));
+ uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
+
+ return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
+}
+
+BufferOffset
+Assembler::as_j(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_j, off).encode());
+ return bo;
+}
+BufferOffset
+Assembler::as_jal(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
+ return bo;
+}
+
+BufferOffset
+Assembler::as_jr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
+ return bo;
+}
+BufferOffset
+Assembler::as_jalr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
+ return bo;
+}
+
+
+// Arithmetic instructions
+BufferOffset
+Assembler::as_addu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
+}
+
+BufferOffset
+Assembler::as_addiu(Register rd, Register rs, int32_t j)
+{
+ JS_ASSERT(Imm16::isInSignedRange(j));
+ return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+Assembler::as_subu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
+}
+
+BufferOffset
+Assembler::as_mult(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
+}
+
+BufferOffset
+Assembler::as_multu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
+}
+
+BufferOffset
+Assembler::as_div(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
+}
+
+BufferOffset
+Assembler::as_divu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
+}
+
+BufferOffset
+Assembler::as_mul(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
+}
+
+BufferOffset
+Assembler::as_lui(Register rd, int32_t j)
+{
+ JS_ASSERT(Imm16::isInUnsignedRange(j));
+ return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
+}
+
+// Shift instructions
+BufferOffset
+Assembler::as_sll(Register rd, Register rt, uint16_t sa)
+{
+ JS_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
+}
+
+BufferOffset
+Assembler::as_sllv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
+}
+
+BufferOffset
+Assembler::as_srl(Register rd, Register rt, uint16_t sa)
+{
+ JS_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+Assembler::as_srlv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
+}
+
+BufferOffset
+Assembler::as_sra(Register rd, Register rt, uint16_t sa)
+{
+ JS_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
+}
+
+BufferOffset
+Assembler::as_srav(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
+}
+
+BufferOffset
+Assembler::as_rotr(Register rd, Register rt, uint16_t sa)
+{
+ JS_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+Assembler::as_rotrv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
+}
+
+// Load and store instructions
+BufferOffset
+Assembler::as_lb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lbu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lhu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lwl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_lwr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_sb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_sh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_sw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_swl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_swr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
+}
+
+// Move from HI/LO register.
+BufferOffset
+Assembler::as_mfhi(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
+}
+
+BufferOffset
+Assembler::as_mflo(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mflo).encode());
+}
+
+// Set on less than.
+BufferOffset
+Assembler::as_slt(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
+}
+
+BufferOffset
+Assembler::as_sltu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
+}
+
+BufferOffset
+Assembler::as_slti(Register rd, Register rs, int32_t j)
+{
+ JS_ASSERT(Imm16::isInSignedRange(j));
+ return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+Assembler::as_sltiu(Register rd, Register rs, uint32_t j)
+{
+ JS_ASSERT(Imm16::isInUnsignedRange(j));
+ return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
+}
+
+// Conditional move.
+BufferOffset
+Assembler::as_movz(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
+}
+
+BufferOffset
+Assembler::as_movn(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
+}
+
+BufferOffset
+Assembler::as_movt(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 1);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+BufferOffset
+Assembler::as_movf(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 0);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+// Bit twiddling.
+BufferOffset
+Assembler::as_clz(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_clz).encode());
+}
+
+BufferOffset
+Assembler::as_ins(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
+}
+
+BufferOffset
+Assembler::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
+}
+
+// FP instructions
+BufferOffset
+Assembler::as_ld(FloatRegister fd, Register base, int32_t off)
+{
+ JS_ASSERT(Imm16::isInSignedRange(off));
+ return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_sd(FloatRegister fd, Register base, int32_t off)
+{
+ JS_ASSERT(Imm16::isInSignedRange(off));
+ return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_ls(FloatRegister fd, Register base, int32_t off)
+{
+ JS_ASSERT(Imm16::isInSignedRange(off));
+ return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_ss(FloatRegister fd, Register base, int32_t off)
+{
+ JS_ASSERT(Imm16::isInSignedRange(off));
+ return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+Assembler::as_movs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_movd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_mtc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
+}
+
+BufferOffset
+Assembler::as_mfc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
+}
+
+// FP convert instructions
+BufferOffset
+Assembler::as_ceilws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_floorws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_roundws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_truncws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_ceilwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_floorwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_roundwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_truncwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtds(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtdw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtsd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtsw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cvtws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+// FP arithmetic instructions
+BufferOffset
+Assembler::as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_abss(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_absd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_negd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_sqrts(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_sqrtd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+// FP compare instructions
+BufferOffset
+Assembler::as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+}
+
+BufferOffset
+Assembler::as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+}
+
+
+void
+Assembler::bind(Label *label, BufferOffset boff)
+{
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ Instruction *inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm *>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void
+Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
+{
+ int32_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ JS_ASSERT(BOffImm16::isInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+ if (BOffImm16::isInRange(offset)) {
+ bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ // Skip the trailing nops in conditional branches.
+ if (conditional) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void *))).encode();
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ // Handle long call.
+ addLongJump(BufferOffset(branch));
+ writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ } else if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch));
+ writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(void *)));
+ writeLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void
+Assembler::bind(RepatchLabel *label)
+{
+ BufferOffset dest = nextOffset();
+ if (label->used()) {
+ // If the label has a use, then change this use to refer to
+ // the bound label;
+ BufferOffset b(label->offset());
+ Instruction *inst1 = editSrc(b);
+ Instruction *inst2 = inst1->next();
+
+ updateLuiOriValue(inst1, inst2, dest.getOffset());
+ }
+ label->bind(dest.getOffset());
+}
+
+void
+Assembler::retarget(Label *label, Label *target)
+{
+ if (label->used()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction *inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction *inst = editSrc(labelBranchOffset);
+ int32_t prev = target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ DebugOnly<uint32_t> prev = target->use(label->offset());
+ JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+static int stopBKPT = -1;
+void
+Assembler::as_break(uint32_t code)
+{
+ JS_ASSERT(code <= MAX_BREAK_CODE);
+ writeInst(op_special | code << RTShift | ff_break);
+}
+
+uint32_t
+Assembler::patchWrite_NearCallSize()
+{
+ return 4 * sizeof(uint32_t);
+}
+
+void
+Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction *inst = (Instruction *) start.raw();
+ uint8_t *dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of patchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[3] = InstNOP();
+
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), patchWrite_NearCallSize());
+}
+
+uint32_t
+Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1)
+{
+ InstImm *i0 = (InstImm *) inst0;
+ InstImm *i1 = (InstImm *) inst1;
+ JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ uint32_t value = i0->extractImm16Value() << 16;
+ value = value | i1->extractImm16Value();
+ return value;
+}
+
+void
+Assembler::updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value)
+{
+ JS_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ JS_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm *) inst0)->setImm16(Imm16::upper(Imm32(value)));
+ ((InstImm *) inst1)->setImm16(Imm16::lower(Imm32(value)));
+}
+
+void
+Assembler::writeLuiOriInstructions(Instruction *inst0, Instruction *inst1,
+ Register reg, uint32_t value)
+{
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::upper(Imm32(value)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::lower(Imm32(value)));
+}
+
+void
+Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue)
+{
+ Instruction *inst = (Instruction *) label.raw();
+
+ // Extract old Value
+ DebugOnly<uint32_t> value = Assembler::extractLuiOriValue(&inst[0], &inst[1]);
+ JS_ASSERT(value == uint32_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
+
+ AutoFlushICache::flush(uintptr_t(inst), 8);
+}
+
+void
+Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
+{
+ patchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void
+Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
+{
+ // Raw is going to be the return address.
+ uint32_t *raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t *
+Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
+{
+ Instruction *inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr)
+ *count += sizeof(Instruction);
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+// Since there are no pools in MIPS implementation, this should be simple.
+Instruction *
+Instruction::next()
+{
+ return this + 1;
+}
+
+InstImm Assembler::invertBranch(InstImm branch, BOffImm16 skipOffset)
+{
+ uint32_t rt = 0;
+ Opcode op = (Opcode) (branch.extractOpcode() << OpcodeShift);
+ switch(op) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq);
+ return branch;
+ case op_bgtz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blez);
+ return branch;
+ case op_blez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgtz);
+ return branch;
+ case op_regimm:
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt == (rt_bltz >> RTShift)) {
+ branch.setRT(rt_bgez);
+ return branch;
+ }
+ if (rt == (rt_bgez >> RTShift)) {
+ branch.setRT(rt_bltz);
+ return branch;
+ }
+
+ MOZ_ASSUME_UNREACHABLE("Error creating long branch.");
+ return branch;
+
+ case op_cop1:
+ JS_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt & 0x1)
+ branch.setRT((RTField) ((rt & ~0x1) << RTShift));
+ else
+ branch.setRT((RTField) ((rt | 0x1) << RTShift));
+ return branch;
+ }
+
+ MOZ_ASSUME_UNREACHABLE("Error creating long branch.");
+ return branch;
+}
+
+void
+Assembler::ToggleToJmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm *)inst_.raw();
+
+ JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
+ // We converted beq to andi, so now we restore it.
+ inst->setOpcode(op_beq);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+void
+Assembler::ToggleToCmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm *)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
+ // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
+ inst->setOpcode(op_andi);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ Instruction *inst = (Instruction *)inst_.raw();
+ InstImm *i0 = (InstImm *) inst;
+ InstImm *i1 = (InstImm *) i0->next();
+ Instruction *i2 = (Instruction *) i1->next();
+
+ JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i2 = jalr;
+ } else {
+ InstNOP nop;
+ *i2 = nop;
+ }
+
+ AutoFlushICache::flush(uintptr_t(i2), 4);
+}
+
+void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.h b/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.h
new file mode 100644
index 0000000..c5ce1f1
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Assembler-mips.h
@@ -0,0 +1,1255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_Assembler_mips_h
+#define jit_mips_Assembler_mips_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/IonSpewer.h"
+#include "jit/mips/Architecture-mips.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+namespace js {
+namespace jit {
+
+static MOZ_CONSTEXPR_VAR Register zero = { Registers::zero };
+static MOZ_CONSTEXPR_VAR Register at = { Registers::at };
+static MOZ_CONSTEXPR_VAR Register v0 = { Registers::v0 };
+static MOZ_CONSTEXPR_VAR Register v1 = { Registers::v1 };
+static MOZ_CONSTEXPR_VAR Register a0 = { Registers::a0 };
+static MOZ_CONSTEXPR_VAR Register a1 = { Registers::a1 };
+static MOZ_CONSTEXPR_VAR Register a2 = { Registers::a2 };
+static MOZ_CONSTEXPR_VAR Register a3 = { Registers::a3 };
+static MOZ_CONSTEXPR_VAR Register t0 = { Registers::t0 };
+static MOZ_CONSTEXPR_VAR Register t1 = { Registers::t1 };
+static MOZ_CONSTEXPR_VAR Register t2 = { Registers::t2 };
+static MOZ_CONSTEXPR_VAR Register t3 = { Registers::t3 };
+static MOZ_CONSTEXPR_VAR Register t4 = { Registers::t4 };
+static MOZ_CONSTEXPR_VAR Register t5 = { Registers::t5 };
+static MOZ_CONSTEXPR_VAR Register t6 = { Registers::t6 };
+static MOZ_CONSTEXPR_VAR Register t7 = { Registers::t7 };
+static MOZ_CONSTEXPR_VAR Register s0 = { Registers::s0 };
+static MOZ_CONSTEXPR_VAR Register s1 = { Registers::s1 };
+static MOZ_CONSTEXPR_VAR Register s2 = { Registers::s2 };
+static MOZ_CONSTEXPR_VAR Register s3 = { Registers::s3 };
+static MOZ_CONSTEXPR_VAR Register s4 = { Registers::s4 };
+static MOZ_CONSTEXPR_VAR Register s5 = { Registers::s5 };
+static MOZ_CONSTEXPR_VAR Register s6 = { Registers::s6 };
+static MOZ_CONSTEXPR_VAR Register s7 = { Registers::s7 };
+static MOZ_CONSTEXPR_VAR Register t8 = { Registers::t8 };
+static MOZ_CONSTEXPR_VAR Register t9 = { Registers::t9 };
+static MOZ_CONSTEXPR_VAR Register k0 = { Registers::k0 };
+static MOZ_CONSTEXPR_VAR Register k1 = { Registers::k1 };
+static MOZ_CONSTEXPR_VAR Register gp = { Registers::gp };
+static MOZ_CONSTEXPR_VAR Register sp = { Registers::sp };
+static MOZ_CONSTEXPR_VAR Register fp = { Registers::fp };
+static MOZ_CONSTEXPR_VAR Register ra = { Registers::ra };
+
+static MOZ_CONSTEXPR_VAR Register ScratchRegister = at;
+static MOZ_CONSTEXPR_VAR Register SecondScratchReg = t8;
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static MOZ_CONSTEXPR_VAR Register OsrFrameReg = a3;
+static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = s3;
+static MOZ_CONSTEXPR_VAR Register CallTempReg0 = t0;
+static MOZ_CONSTEXPR_VAR Register CallTempReg1 = t1;
+static MOZ_CONSTEXPR_VAR Register CallTempReg2 = t2;
+static MOZ_CONSTEXPR_VAR Register CallTempReg3 = t3;
+static MOZ_CONSTEXPR_VAR Register CallTempReg4 = t4;
+static MOZ_CONSTEXPR_VAR Register CallTempReg5 = t5;
+
+static MOZ_CONSTEXPR_VAR Register IntArgReg0 = a0;
+static MOZ_CONSTEXPR_VAR Register IntArgReg1 = a1;
+static MOZ_CONSTEXPR_VAR Register IntArgReg2 = a2;
+static MOZ_CONSTEXPR_VAR Register IntArgReg3 = a3;
+static MOZ_CONSTEXPR_VAR Register GlobalReg = s6; // used by Odin
+static MOZ_CONSTEXPR_VAR Register HeapReg = s7; // used by Odin
+static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { t0, t1, t2, t3, t4 };
+static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ unsigned usedArgSlots_;
+ bool firstArgFloat;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg ¤t() { return current_; }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 4)
+ return 4 * sizeof(intptr_t);
+
+ return usedArgSlots_ * sizeof(intptr_t);
+ }
+
+ static const Register NonArgReturnVolatileReg0;
+ static const Register NonArgReturnVolatileReg1;
+};
+
+static MOZ_CONSTEXPR_VAR Register PreBarrierReg = a1;
+
+static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg };
+static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { FloatRegisters::invalid_freg };
+
+static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = v1;
+static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = v0;
+static MOZ_CONSTEXPR_VAR Register StackPointer = sp;
+static MOZ_CONSTEXPR_VAR Register FramePointer = fp;
+static MOZ_CONSTEXPR_VAR Register ReturnReg = v0;
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = { FloatRegisters::f0 };
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = { FloatRegisters::f18 };
+static MOZ_CONSTEXPR_VAR FloatRegister SecondScratchFloatReg = { FloatRegisters::f16 };
+
+static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::f30 };
+
+static MOZ_CONSTEXPR_VAR FloatRegister f0 = {FloatRegisters::f0};
+static MOZ_CONSTEXPR_VAR FloatRegister f2 = {FloatRegisters::f2};
+static MOZ_CONSTEXPR_VAR FloatRegister f4 = {FloatRegisters::f4};
+static MOZ_CONSTEXPR_VAR FloatRegister f6 = {FloatRegisters::f6};
+static MOZ_CONSTEXPR_VAR FloatRegister f8 = {FloatRegisters::f8};
+static MOZ_CONSTEXPR_VAR FloatRegister f10 = {FloatRegisters::f10};
+static MOZ_CONSTEXPR_VAR FloatRegister f12 = {FloatRegisters::f12};
+static MOZ_CONSTEXPR_VAR FloatRegister f14 = {FloatRegisters::f14};
+static MOZ_CONSTEXPR_VAR FloatRegister f16 = {FloatRegisters::f16};
+static MOZ_CONSTEXPR_VAR FloatRegister f18 = {FloatRegisters::f18};
+static MOZ_CONSTEXPR_VAR FloatRegister f20 = {FloatRegisters::f20};
+static MOZ_CONSTEXPR_VAR FloatRegister f22 = {FloatRegisters::f22};
+static MOZ_CONSTEXPR_VAR FloatRegister f24 = {FloatRegisters::f24};
+static MOZ_CONSTEXPR_VAR FloatRegister f26 = {FloatRegisters::f26};
+static MOZ_CONSTEXPR_VAR FloatRegister f28 = {FloatRegisters::f28};
+static MOZ_CONSTEXPR_VAR FloatRegister f30 = {FloatRegisters::f30};
+
+// MIPS CPUs can only load multibyte data that is "naturally"
+// four-byte-aligned, sp register should be eight-byte-aligned.
+static const uint32_t StackAlignment = 8;
+static const uint32_t CodeAlignment = 4;
+static const bool StackKeptAligned = true;
+// NativeFrameSize is the size of return adress on stack in AsmJS functions.
+static const uint32_t NativeFrameSize = sizeof(void*);
+static const uint32_t AlignmentAtPrologue = 0;
+static const uint32_t AlignmentMidPrologue = NativeFrameSize;
+
+static const Scale ScalePointer = TimesFour;
+
+// MIPS instruction types
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 5 | 5 | 6 |
+// +---------------------------------------------------------------+
+// Register type | Opcode | Rs | Rt | Rd | Sa | Function |
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 16 |
+// +---------------------------------------------------------------+
+// Immediate type | Opcode | Rs | Rt | 2's complement constant |
+// +---------------------------------------------------------------+
+// | 6 | 26 |
+// +---------------------------------------------------------------+
+// Jump type | Opcode | jump_target |
+// +---------------------------------------------------------------+
+// 31 bit bit 0
+
+// MIPS instruction encoding constants.
+static const uint32_t OpcodeShift = 26;
+static const uint32_t OpcodeBits = 6;
+static const uint32_t RSShift = 21;
+static const uint32_t RSBits = 5;
+static const uint32_t RTShift = 16;
+static const uint32_t RTBits = 5;
+static const uint32_t RDShift = 11;
+static const uint32_t RDBits = 5;
+static const uint32_t SAShift = 6;
+static const uint32_t SABits = 5;
+static const uint32_t FunctionShift = 0;
+static const uint32_t FunctionBits = 5;
+static const uint32_t Imm16Shift = 0;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t Imm28Shift = 0;
+static const uint32_t Imm28Bits = 28;
+static const uint32_t ImmFieldShift = 2;
+static const uint32_t FccMask = 0x7;
+static const uint32_t FccShift = 2;
+
+
+// MIPS instruction field bit masks.
+static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift;
+static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
+static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
+static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
+static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
+static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
+static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
+static const uint32_t RegMask = Registers::Total - 1;
+static const uint32_t StackAlignmentMask = StackAlignment - 1;
+
+static const int32_t MAX_BREAK_CODE = 1024 - 1;
+
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+class BranchInstBlock;
+
+uint32_t RS(Register r);
+uint32_t RT(Register r);
+uint32_t RT(uint32_t regCode);
+uint32_t RT(FloatRegister r);
+uint32_t RD(Register r);
+uint32_t RD(FloatRegister r);
+uint32_t RD(uint32_t regCode);
+uint32_t SA(uint32_t value);
+uint32_t SA(FloatRegister r);
+
+Register toRS (Instruction &i);
+Register toRT (Instruction &i);
+Register toRD (Instruction &i);
+Register toR (Instruction &i);
+
+// MIPS enums for instruction fields
+enum Opcode {
+ op_special = 0 << OpcodeShift,
+ op_regimm = 1 << OpcodeShift,
+
+ op_j = 2 << OpcodeShift,
+ op_jal = 3 << OpcodeShift,
+ op_beq = 4 << OpcodeShift,
+ op_bne = 5 << OpcodeShift,
+ op_blez = 6 << OpcodeShift,
+ op_bgtz = 7 << OpcodeShift,
+
+ op_addi = 8 << OpcodeShift,
+ op_addiu = 9 << OpcodeShift,
+ op_slti = 10 << OpcodeShift,
+ op_sltiu = 11 << OpcodeShift,
+ op_andi = 12 << OpcodeShift,
+ op_ori = 13 << OpcodeShift,
+ op_xori = 14 << OpcodeShift,
+ op_lui = 15 << OpcodeShift,
+
+ op_cop1 = 17 << OpcodeShift,
+ op_cop1x = 19 << OpcodeShift,
+
+ op_beql = 20 << OpcodeShift,
+ op_bnel = 21 << OpcodeShift,
+ op_blezl = 22 << OpcodeShift,
+ op_bgtzl = 23 << OpcodeShift,
+
+ op_special2 = 28 << OpcodeShift,
+ op_special3 = 31 << OpcodeShift,
+
+ op_lb = 32 << OpcodeShift,
+ op_lh = 33 << OpcodeShift,
+ op_lwl = 34 << OpcodeShift,
+ op_lw = 35 << OpcodeShift,
+ op_lbu = 36 << OpcodeShift,
+ op_lhu = 37 << OpcodeShift,
+ op_lwr = 38 << OpcodeShift,
+ op_sb = 40 << OpcodeShift,
+ op_sh = 41 << OpcodeShift,
+ op_swl = 42 << OpcodeShift,
+ op_sw = 43 << OpcodeShift,
+ op_swr = 46 << OpcodeShift,
+
+ op_lwc1 = 49 << OpcodeShift,
+ op_ldc1 = 53 << OpcodeShift,
+
+ op_swc1 = 57 << OpcodeShift,
+ op_sdc1 = 61 << OpcodeShift
+};
+
+enum RSField {
+ rs_zero = 0 << RSShift,
+ // cop1 encoding of RS field.
+ rs_mfc1 = 0 << RSShift,
+ rs_one = 1 << RSShift,
+ rs_cfc1 = 2 << RSShift,
+ rs_mfhc1 = 3 << RSShift,
+ rs_mtc1 = 4 << RSShift,
+ rs_ctc1 = 6 << RSShift,
+ rs_mthc1 = 7 << RSShift,
+ rs_bc1 = 8 << RSShift,
+ rs_s = 16 << RSShift,
+ rs_d = 17 << RSShift,
+ rs_w = 20 << RSShift,
+ rs_ps = 22 << RSShift
+};
+
+enum RTField {
+ rt_zero = 0 << RTShift,
+ // regimm encoding of RT field.
+ rt_bltz = 0 << RTShift,
+ rt_bgez = 1 << RTShift,
+ rt_bltzal = 16 << RTShift,
+ rt_bgezal = 17 << RTShift
+};
+
+enum FunctionField {
+ // special encoding of function field.
+ ff_sll = 0,
+ ff_movci = 1,
+ ff_srl = 2,
+ ff_sra = 3,
+ ff_sllv = 4,
+ ff_srlv = 6,
+ ff_srav = 7,
+
+ ff_jr = 8,
+ ff_jalr = 9,
+ ff_movz = 10,
+ ff_movn = 11,
+ ff_break = 13,
+
+ ff_mfhi = 16,
+ ff_mflo = 18,
+
+ ff_mult = 24,
+ ff_multu = 25,
+ ff_div = 26,
+ ff_divu = 27,
+
+ ff_add = 32,
+ ff_addu = 33,
+ ff_sub = 34,
+ ff_subu = 35,
+ ff_and = 36,
+ ff_or = 37,
+ ff_xor = 38,
+ ff_nor = 39,
+
+ ff_slt = 42,
+ ff_sltu = 43,
+
+ // special2 encoding of function field.
+ ff_mul = 2,
+ ff_clz = 32,
+ ff_clo = 33,
+
+ // special3 encoding of function field.
+ ff_ext = 0,
+ ff_ins = 4,
+
+ // cop1 encoding of function field.
+ ff_add_fmt = 0,
+ ff_sub_fmt = 1,
+ ff_mul_fmt = 2,
+ ff_div_fmt = 3,
+ ff_sqrt_fmt = 4,
+ ff_abs_fmt = 5,
+ ff_mov_fmt = 6,
+ ff_neg_fmt = 7,
+
+ ff_round_w_fmt = 12,
+ ff_trunc_w_fmt = 13,
+ ff_ceil_w_fmt = 14,
+ ff_floor_w_fmt = 15,
+
+ ff_cvt_s_fmt = 32,
+ ff_cvt_d_fmt = 33,
+ ff_cvt_w_fmt = 36,
+
+ ff_c_f_fmt = 48,
+ ff_c_un_fmt = 49,
+ ff_c_eq_fmt = 50,
+ ff_c_ueq_fmt = 51,
+ ff_c_olt_fmt = 52,
+ ff_c_ult_fmt = 53,
+ ff_c_ole_fmt = 54,
+ ff_c_ule_fmt = 55,
+};
+
+class MacroAssemblerMIPS;
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ JS_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ JS_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16) + 4;
+ }
+
+ explicit BOffImm16(int offset)
+ : data ((offset - 4) >> 2 & Imm16Mask)
+ {
+ JS_ASSERT((offset & 0x3) == 0);
+ JS_ASSERT(isInRange(offset));
+ }
+ static bool isInRange(int offset) {
+ if ((offset - 4) < (INT16_MIN << 2))
+ return false;
+ if ((offset - 4) > (INT16_MAX << 2))
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction *getDest(Instruction *src);
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ JS_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ JS_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6) + 4;
+ }
+
+ explicit JOffImm26(int offset)
+ : data ((offset - 4) >> 2 & Imm26Mask)
+ {
+ JS_ASSERT((offset & 0x3) == 0);
+ JS_ASSERT(isInRange(offset));
+ }
+ static bool isInRange(int offset) {
+ if ((offset - 4) < -536870912)
+ return false;
+ if ((offset - 4) > 536870908)
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction *getDest(Instruction *src);
+
+};
+
+class Imm16
+{
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm)
+ : value(imm)
+ { }
+ uint32_t encode() {
+ return value;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool isInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+ static bool isInUnsignedRange(uint32_t imm) {
+ return imm <= UINT16_MAX ;
+ }
+ static Imm16 lower (Imm32 imm) {
+ return Imm16(imm.value & 0xffff);
+ }
+ static Imm16 upper (Imm32 imm) {
+ return Imm16((imm.value >> 16) & 0xffff);
+ }
+};
+
+class Operand
+{
+ public:
+ enum Tag {
+ REG,
+ FREG,
+ MEM
+ };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand (Register reg_)
+ : tag(REG), reg(reg_.code())
+ { }
+
+ Operand (FloatRegister freg)
+ : tag(FREG), reg(freg.code())
+ { }
+
+ Operand (Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value)
+ { }
+
+ Operand (Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off)
+ { }
+
+ Operand (const Address &addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset)
+ { }
+
+ Tag getTag() const {
+ return tag;
+ }
+
+ Register toReg() const {
+ JS_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ JS_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register *r, Imm32 *dest) const {
+ JS_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ JS_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ JS_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ JS_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ JS_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+void
+PatchJump(CodeLocationJump &jump_, CodeLocationLabel label);
+class Assembler;
+typedef js::jit::AssemblerBuffer<1024, Instruction> MIPSBuffer;
+
+class Assembler : public AssemblerShared
+{
+ public:
+
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPConditionBit {
+ FCC0 = 0,
+ FCC1,
+ FCC2,
+ FCC3,
+ FCC4,
+ FCC5,
+ FCC6,
+ FCC7
+ };
+
+ enum FloatFormat {
+ SingleFloat,
+ DoubleFloat
+ };
+
+ enum JumpOrCall {
+ BranchIsJump,
+ BranchIsCall
+ };
+
+ enum FloatTestKind {
+ TestForTrue,
+ TestForFalse
+ };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() {
+ return m_buffer.nextOffset();
+ }
+
+ protected:
+ Instruction * editSrc (BufferOffset bo) {
+ return m_buffer.getInst(bo);
+ }
+ public:
+ uint32_t actualOffset(uint32_t) const;
+ uint32_t actualIndex(uint32_t) const;
+ static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index);
+ protected:
+
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch
+ {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void *target;
+ Relocation::Kind kind;
+
+ RelativePatch(BufferOffset offset, void *target, Relocation::Kind kind)
+ : offset(offset),
+ target(target),
+ kind(kind)
+ { }
+ };
+
+ js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+ js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter relocations_;
+ CompactBufferWriter preBarriers_;
+
+ bool enoughMemory_;
+
+ MIPSBuffer m_buffer;
+
+ public:
+ Assembler()
+ : enoughMemory_(true),
+ m_buffer(),
+ isFinished(false)
+ { }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer *trc);
+ void writeRelocation(BufferOffset src) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(const ImmGCPtr &ptr) {
+ if (ptr.value)
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ void writePrebarrierOffset(CodeOffsetLabel label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ public:
+ static uintptr_t getPointer(uint8_t *);
+
+ bool oom() const;
+
+ void setPrinter(Sprinter *sp) {
+ }
+
+ private:
+ bool isFinished;
+ public:
+ void finish();
+ void executableCopy(void *buffer);
+ void copyJumpRelocationTable(uint8_t *dest);
+ void copyDataRelocationTable(uint8_t *dest);
+ void copyPreBarrierTable(uint8_t *dest);
+
+ bool addCodeLabel(CodeLabel label);
+ size_t numCodeLabels() const {
+ return codeLabels_.length();
+ }
+ CodeLabel codeLabel(size_t i) {
+ return codeLabels_[i];
+ }
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ size_t preBarrierTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void writeInstStatic(uint32_t x, uint32_t *dest);
+
+ public:
+ BufferOffset align(int alignment);
+ BufferOffset as_nop();
+
+ // Branch and jump instructions
+ BufferOffset as_bal(BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall);
+ InstImm getBranchCode(Register s, Register t, Condition c);
+ InstImm getBranchCode(Register s, Condition c);
+ InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
+
+ BufferOffset as_j(JOffImm26 off);
+ BufferOffset as_jal(JOffImm26 off);
+
+ BufferOffset as_jr(Register rs);
+ BufferOffset as_jalr(Register rs);
+
+ // Arithmetic instructions
+ BufferOffset as_addu(Register rd, Register rs, Register rt);
+ BufferOffset as_addiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_subu(Register rd, Register rs, Register rt);
+ BufferOffset as_mult(Register rs, Register rt);
+ BufferOffset as_multu(Register rs, Register rt);
+ BufferOffset as_div(Register rs, Register rt);
+ BufferOffset as_divu(Register rs, Register rt);
+ BufferOffset as_mul(Register rd, Register rs, Register rt);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rs, Register rt);
+ BufferOffset as_or(Register rd, Register rs, Register rt);
+ BufferOffset as_xor(Register rd, Register rs, Register rt);
+ BufferOffset as_nor(Register rd, Register rs, Register rt);
+
+ BufferOffset as_andi(Register rd, Register rs, int32_t j);
+ BufferOffset as_ori(Register rd, Register rs, int32_t j);
+ BufferOffset as_xori(Register rd, Register rs, int32_t j);
+ BufferOffset as_lui(Register rd, int32_t j);
+
+ // Shift instructions
+ // as_sll(zero, zero, x) instructions are reserved as nop
+ BufferOffset as_sll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_sllv(Register rd, Register rt, Register rs);
+ BufferOffset as_srl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srlv(Register rd, Register rt, Register rs);
+ BufferOffset as_sra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srav(Register rd, Register rt, Register rs);
+ BufferOffset as_rotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_rotrv(Register rd, Register rt, Register rs);
+
+ // Load and store instructions
+ BufferOffset as_lb(Register rd, Register rs, int16_t off);
+ BufferOffset as_lbu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lh(Register rd, Register rs, int16_t off);
+ BufferOffset as_lhu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lw(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwl(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sb(Register rd, Register rs, int16_t off);
+ BufferOffset as_sh(Register rd, Register rs, int16_t off);
+ BufferOffset as_sw(Register rd, Register rs, int16_t off);
+ BufferOffset as_swl(Register rd, Register rs, int16_t off);
+ BufferOffset as_swr(Register rd, Register rs, int16_t off);
+
+ // Move from HI/LO register.
+ BufferOffset as_mfhi(Register rd);
+ BufferOffset as_mflo(Register rd);
+
+ // Set on less than.
+ BufferOffset as_slt(Register rd, Register rs, Register rt);
+ BufferOffset as_sltu(Register rd, Register rs, Register rt);
+ BufferOffset as_slti(Register rd, Register rs, int32_t j);
+ BufferOffset as_sltiu(Register rd, Register rs, uint32_t j);
+
+ // Conditional move.
+ BufferOffset as_movz(Register rd, Register rs, Register rt);
+ BufferOffset as_movn(Register rd, Register rs, Register rt);
+ BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ BufferOffset as_clz(Register rd, Register rs, Register rt = Register::FromCode(0));
+ BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // FP instructions
+
+ // Use these two functions only when you are sure address is aligned.
+ // Otherwise, use ma_ld and ma_sd.
+ BufferOffset as_ld(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_sd(FloatRegister fd, Register base, int32_t off);
+
+ BufferOffset as_ls(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_ss(FloatRegister fd, Register base, int32_t off);
+
+ BufferOffset as_movs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_movd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_mtc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfc1(Register rt, FloatRegister fs);
+
+ protected:
+ // This is used to access the odd regiter form the pair of single
+ // precision registers that make one double register.
+ FloatRegister getOddPair(FloatRegister reg) {
+ JS_ASSERT(reg.code() % 2 == 0);
+ return FloatRegister::FromCode(reg.code() + 1);
+ }
+
+ public:
+ // FP convert instructions
+ BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncws(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs);
+
+ // FP arithmetic instructions
+ BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+
+ BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
+
+ // FP compare instructions
+ BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+
+ // label operations
+ void bind(Label *label, BufferOffset boff = BufferOffset());
+ void bind(RepatchLabel *label);
+ uint32_t currentOffset() {
+ return nextOffset().getOffset();
+ }
+ void retarget(Label *label, Label *target);
+ void Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address);
+
+ // See Bind
+ size_t labelOffsetToPatchOffset(size_t offset) {
+ return actualOffset(offset);
+ }
+
+ void call(Label *label);
+ void call(void *target);
+
+ void as_break(uint32_t code);
+
+ public:
+ static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
+ static void TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void bind(InstImm *inst, uint32_t branch, uint32_t target);
+ void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ void addLongJump(BufferOffset src) {
+ enoughMemory_ &= longJumps_.append(src.getOffset());
+ }
+
+ public:
+ size_t numLongJumps() const {
+ return longJumps_.length();
+ }
+ uint32_t longJump(size_t i) {
+ return longJumps_[i];
+ }
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t *buffer);
+
+ void flushBuffer() {
+ }
+
+ static uint32_t patchWrite_NearCallSize();
+ static uint32_t nopSize() { return 4; }
+
+ static uint32_t extractLuiOriValue(Instruction *inst0, Instruction *inst1);
+ static void updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value);
+ static void writeLuiOriInstructions(Instruction *inst, Instruction *inst1,
+ Register reg, uint32_t value);
+
+ static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+ static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+ static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+ static uint32_t alignDoubleArg(uint32_t offset) {
+ return (offset + 1U) &~ 1U;
+ }
+
+ static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+
+ static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
+ void processCodeLabels(uint8_t *rawCode);
+
+ bool bailed() {
+ return m_buffer.bail();
+ }
+}; // Assembler
+
+// An Instruction is a structure for both encoding and decoding any and all
+// MIPS instructions.
+class Instruction
+{
+ protected:
+ // sll zero, zero, 0
+ static const uint32_t NopInst = 0x00000000;
+
+ uint32_t data;
+
+ // Standard constructor
+ Instruction (uint32_t data_) : data(data_) { }
+
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+ public:
+ uint32_t encode() const {
+ return data;
+ }
+
+ void makeNop() {
+ data = NopInst;
+ }
+
+ void setData(uint32_t data) {
+ this->data = data;
+ }
+
+ const Instruction & operator=(const Instruction &src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) {
+ return (encode() >> bit) & 1;
+ }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+ // Since all MIPS instructions have opcode, the opcode
+ // extractor resides in the base class.
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ // Return the fields at their original place in the instruction encoding.
+ Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(encode() & OpcodeMask);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction *next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t *raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4, "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction
+{
+ public:
+ InstNOP()
+ : Instruction(NopInst)
+ { }
+
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction
+{
+ public:
+ InstReg(Opcode op, Register rd, FunctionField ff)
+ : Instruction(op | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, RTField rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, uint32_t cc, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, uint32_t code, FunctionField ff)
+ : Instruction(op | code | ff)
+ { }
+ // for float point
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd)
+ : Instruction(op | rs | RT(rt) | RD(rd))
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff)
+ { }
+
+ uint32_t extractRS () {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT () {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ uint32_t extractRD () {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA () {
+ return extractBitField(SAShift + SABits - 1, SAShift);
+ }
+ uint32_t extractFunctionField () {
+ return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction
+{
+ public:
+ void extractImm16(BOffImm16 *dest);
+
+ InstImm(Opcode op, Register rs, Register rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, RTField rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | rt | off.encode())
+ { }
+ InstImm(Opcode op, RSField rs, uint32_t cc, BOffImm16 off)
+ : Instruction(op | rs | cc | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, Register rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(uint32_t raw)
+ : Instruction(raw)
+ { }
+ // For floating-point loads and stores.
+ InstImm(Opcode op, Register rs, FloatRegister rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ void setOpcode(Opcode op) {
+ data = (data & ~OpcodeMask) | op;
+ }
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ void setRT(RTField rt) {
+ data = (data & ~RTMask) | rt;
+ }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+ void setImm16(Imm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction
+{
+ public:
+ InstJump(Opcode op, JOffImm26 off)
+ : Instruction(op | off.encode())
+ { }
+
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+static const uint32_t NumIntArgRegs = 4;
+
+static inline bool
+GetIntArgReg(uint32_t usedArgSlots, Register *out)
+{
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
+{
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ JS_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+static inline uint32_t
+GetArgStackDisp(uint32_t usedArgSlots)
+{
+ JS_ASSERT(usedArgSlots >= NumIntArgRegs);
+ // Even register arguments have place reserved on stack.
+ return usedArgSlots * sizeof(intptr_t);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_Assembler_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.cpp
new file mode 100644
index 0000000..4cde9d7
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.cpp
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/Bailouts-mips.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+IonBailoutIterator::IonBailoutIterator(const JitActivationIterator &activations,
+ BailoutStack *bailout)
+ : IonFrameIterator(activations),
+ machine_(bailout->machine())
+{
+ uint8_t *sp = bailout->parentStackPointer();
+ uint8_t *fp = sp + bailout->frameSize();
+
+ current_ = fp;
+ type_ = JitFrame_IonJS;
+ topFrameSize_ = current_ - sp;
+ topIonScript_ = script()->ionScript();
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JitActivation *activation = activations.activation()->asJit();
+ JSRuntime *rt = activation->compartment()->runtimeFromMainThread();
+ JitCode *code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
+
+IonBailoutIterator::IonBailoutIterator(const JitActivationIterator &activations,
+ InvalidationBailoutStack *bailout)
+ : IonFrameIterator(activations),
+ machine_(bailout->machine())
+{
+ returnAddressToFp_ = bailout->osiPointReturnAddress();
+ topIonScript_ = bailout->ionScript();
+ const OsiIndex *osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+
+ current_ = (uint8_t*) bailout->fp();
+ type_ = JitFrame_IonJS;
+ topFrameSize_ = current_ - bailout->sp();
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.h b/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.h
new file mode 100644
index 0000000..dd14d1e
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Bailouts-mips.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_Bailouts_mips_h
+#define jit_mips_Bailouts_mips_h
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ // This is pushed in the bailout handler. Both entry points into the
+ // handler inserts their own value int lr, which is then placed onto the
+ // stack along with frameClassId_ above. This should be migrated to ip.
+ public:
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+
+ protected:
+ mozilla::Array<double, FloatRegisters::Total> fpregs_;
+ mozilla::Array<uintptr_t, Registers::Total> regs_;
+
+ uintptr_t snapshotOffset_;
+ uintptr_t padding_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t *parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t *)this + sizeof(BailoutStack);
+ return (uint8_t *)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+ static size_t offsetOfFrameClass() {
+ return offsetof(BailoutStack, frameClassId_);
+ }
+ static size_t offsetOfFrameSize() {
+ return offsetof(BailoutStack, frameSize_);
+ }
+ static size_t offsetOfFpRegs() {
+ return offsetof(BailoutStack, fpregs_);
+ }
+ static size_t offsetOfRegs() {
+ return offsetof(BailoutStack, regs_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_Bailouts_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.cpp
new file mode 100644
index 0000000..69c935b
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/BaselineCompiler-mips.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPS::BaselineCompilerMIPS(JSContext *cx, TempAllocator &alloc,
+ HandleScript script)
+ : BaselineCompilerShared(cx, alloc, script)
+{
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.h b/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.h
new file mode 100644
index 0000000..7db49b3
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/BaselineCompiler-mips.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineCompiler_mips_h
+#define jit_mips_BaselineCompiler_mips_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPS : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerMIPS(JSContext *cx, TempAllocator &alloc, HandleScript script);
+};
+
+typedef BaselineCompilerMIPS BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_BaselineCompiler_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/BaselineHelpers-mips.h b/src/third_party/mozjs/js/src/jit/mips/BaselineHelpers-mips.h
new file mode 100644
index 0000000..565e8df
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/BaselineHelpers-mips.h
@@ -0,0 +1,333 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineHelpers_mips_h
+#define jit_mips_BaselineHelpers_mips_h
+
+#ifdef JS_ION
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineRegisters.h"
+#include "jit/IonMacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler &masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler &masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffsetLabel *patchOffset, MacroAssembler &masm)
+{
+ // Move ICEntry offset into BaselineStubReg.
+ CodeOffsetLabel offset = masm.movWithPatch(ImmWord(-1), BaselineStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into BaselineStubReg.
+ masm.loadPtr(Address(BaselineStubReg, ICEntry::offsetOfFirstStub()), BaselineStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler &masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when BaselineStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(BaselineStubReg, (uint32_t) monitorStubOffset), BaselineStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it.
+ masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Jump to the stubcode.
+ masm.branch(R2.scratchReg());
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler &masm)
+{
+ masm.branch(ra);
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler &masm, Register reg)
+{
+ masm.movePtr(reg, ra);
+}
+
+inline void
+EmitTailCallVM(JitCode *target, MacroAssembler &masm, uint32_t argSize)
+{
+ // We assume during this that R0 and R1 have been pushed, and that R2 is
+ // unused.
+ MOZ_ASSERT(R2 == ValueOperand(t7, t6));
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, t6);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), t6);
+ masm.subPtr(BaselineStackReg, t6);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.ma_subu(t7, t6, Imm32(argSize));
+ masm.storePtr(t7, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Push frame descriptor and perform the tail call.
+ // BaselineTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(BaselineTailCallReg == ra);
+ masm.makeFrameDescriptor(t6, JitFrame_BaselineJS);
+ masm.subPtr(Imm32(sizeof(IonCommonFrameLayout)), StackPointer);
+ masm.storePtr(t6, Address(StackPointer, IonCommonFrameLayout::offsetOfDescriptor()));
+ masm.storePtr(ra, Address(StackPointer, IonCommonFrameLayout::offsetOfReturnAddress()));
+
+ masm.branch(target);
+}
+
+inline void
+EmitCreateStubFrameDescriptor(MacroAssembler &masm, Register reg)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and
+ // previous frame pointer pushed by EmitEnterStubFrame.
+ masm.movePtr(BaselineFrameReg, reg);
+ masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg);
+ masm.subPtr(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub);
+}
+
+inline void
+EmitCallVM(JitCode *target, MacroAssembler &masm)
+{
+ EmitCreateStubFrameDescriptor(masm, t6);
+ masm.push(t6);
+ masm.call(target);
+}
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub);
+
+inline void
+EmitEnterStubFrame(MacroAssembler &masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != BaselineTailCallReg);
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subPtr(BaselineStackReg, scratch);
+
+ masm.storePtr(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
+ masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+ masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor)));
+ masm.storePtr(BaselineTailCallReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, returnAddress)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.storePtr(BaselineStubReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedStub)));
+ masm.storePtr(BaselineFrameReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedFrame)));
+ masm.movePtr(BaselineStackReg, BaselineFrameReg);
+
+ // We pushed 4 words, so the stack is still aligned to 8 bytes.
+ masm.checkStackAlignment();
+}
+
+inline void
+EmitLeaveStubFrame(MacroAssembler &masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ masm.pop(ScratchRegister);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister);
+ masm.addPtr(ScratchRegister, BaselineStackReg);
+ } else {
+ masm.movePtr(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)),
+ BaselineFrameReg);
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)),
+ BaselineStubReg);
+
+ // Load the return address.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)),
+ BaselineTailCallReg);
+
+ // Discard the frame descriptor.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister);
+ masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+}
+
+inline void
+EmitStowICValues(MacroAssembler &masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.pushValue(R0);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+ break;
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler &masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0.
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ break;
+ case 2:
+ // Unstow R0 and R1.
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ break;
+ }
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler &masm, JitCode *code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from $sp, excluding the return address.
+
+ // Save the current BaselineStubReg to stack, as well as the TailCallReg,
+ // since on mips, the $ra is live.
+ masm.subPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+ masm.storePtr(BaselineStubReg, Address(StackPointer, sizeof(intptr_t)));
+ masm.storePtr(BaselineTailCallReg, Address(StackPointer, 0));
+
+ // This is expected to be called from within an IC, when BaselineStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(BaselineStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
+ BaselineStubReg);
+
+ // Load stubcode pointer from BaselineStubReg into BaselineTailCallReg.
+ masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode.
+ masm.call(R2.scratchReg());
+
+ // Restore the old stub reg and tailcall reg.
+ masm.loadPtr(Address(StackPointer, 0), BaselineTailCallReg);
+ masm.loadPtr(Address(StackPointer, sizeof(intptr_t)), BaselineStubReg);
+ masm.addPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.ma_b(R1.scratchReg(), Imm32(1), &success, Assembler::Equal, ShortJump);
+
+ // If the IC failed, then call the update fallback function.
+ EmitEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.pushValue(R0);
+ masm.pushValue(R1);
+ masm.push(BaselineStubReg);
+
+ // Load previous frame pointer, push BaselineFrame *.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitCallVM(code, masm);
+ EmitLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler &masm, const AddrType &addr, MIRType type)
+{
+ // On MIPS, $ra is clobbered by patchableCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.patchableCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler &masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in
+ // the same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into BaselineStubReg
+ masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfNext()), BaselineStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into scratch register.
+ masm.loadPtr(Address(BaselineStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(BaselineTailCallReg == ra);
+ masm.branch(R2.scratchReg());
+}
+
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_ION
+
+#endif /* jit_mips_BaselineHelpers_mips_h */
+
diff --git a/src/third_party/mozjs/js/src/jit/mips/BaselineIC-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/BaselineIC-mips.cpp
new file mode 100644
index 0000000..777f5a6
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/BaselineIC-mips.cpp
@@ -0,0 +1,223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsiter.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineHelpers.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/IonLinker.h"
+
+#include "jsboolinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ Label conditionTrue;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.ma_cmp_set(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
+{
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+ masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
+ Register scratchReg = R2.payloadReg();
+
+ // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+ GeneralRegisterSet savedRegs = availableGeneralRegs(2);
+ savedRegs = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs);
+ ValueOperand savedValue = savedRegs.takeAnyValue();
+
+ Label goodMul, divTest1, divTest2;
+ switch(op_) {
+ case JSOP_ADD:
+ // We know R0.typeReg() already contains the integer tag. No boxing
+ // required.
+ masm.ma_addTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
+ break;
+ case JSOP_SUB:
+ masm.ma_subTestOverflow(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), &failure);
+ break;
+ case JSOP_MUL: {
+ masm.ma_mul_branch_overflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+
+ masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);
+
+ // Result is -0 if operands have different signs.
+ masm.as_xor(t8, R0.payloadReg(), R1.payloadReg());
+ masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+
+ masm.bind(&goodMul);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ }
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ // Check for INT_MIN / -1, it results in a double.
+ masm.ma_b(R0.payloadReg(), Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(-1), &failure, Assembler::Equal, ShortJump);
+ masm.bind(&divTest1);
+
+ // Check for division by zero
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::Equal, ShortJump);
+
+ // Check for 0 / X with X < 0 (results in -0).
+ masm.ma_b(R0.payloadReg(), Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&divTest2);
+
+ masm.as_div(R0.payloadReg(), R1.payloadReg());
+
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
+ masm.as_mflo(scratchReg);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ } else {
+ Label done;
+ // If X % Y == 0 and X < 0, the result is -0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R0.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ }
+ case JSOP_BITOR:
+ masm.ma_or(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.ma_xor(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.ma_and(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_LSH:
+ // MIPS will only use 5 lowest bits in R1 as shift offset.
+ masm.ma_sll(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(scratchReg, R0.payloadReg(), R1.payloadReg());
+ if (allowDouble_) {
+ Label toUint;
+ masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);
+
+ // Move result and box for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(scratchReg, FloatReg1);
+ masm.boxDouble(FloatReg1, R0);
+ } else {
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ // Move result for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.not32(R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(INT32_MAX), &failure);
+
+ masm.neg32(R0.payloadReg());
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected op");
+ return false;
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+
+} // namespace jit
+} // namespace js
diff --git a/src/third_party/mozjs/js/src/jit/mips/BaselineRegisters-mips.h b/src/third_party/mozjs/js/src/jit/mips/BaselineRegisters-mips.h
new file mode 100644
index 0000000..6ecdfea
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/BaselineRegisters-mips.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_BaselineRegisters_mips_h
+#define jit_mips_BaselineRegisters_mips_h
+
+#ifdef JS_ION
+
+#include "jit/IonMacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static MOZ_CONSTEXPR_VAR Register BaselineFrameReg = s5;
+static MOZ_CONSTEXPR_VAR Register BaselineStackReg = sp;
+
+static MOZ_CONSTEXPR_VAR ValueOperand R0(v1, v0);
+static MOZ_CONSTEXPR_VAR ValueOperand R1(s7, s6);
+static MOZ_CONSTEXPR_VAR ValueOperand R2(t7, t6);
+
+// BaselineTailCallReg and BaselineStubReg
+// These use registers that are not preserved across calls.
+static MOZ_CONSTEXPR_VAR Register BaselineTailCallReg = ra;
+static MOZ_CONSTEXPR_VAR Register BaselineStubReg = t5;
+
+static MOZ_CONSTEXPR_VAR Register ExtractTemp0 = InvalidReg;
+static MOZ_CONSTEXPR_VAR Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerMIPS.
+static MOZ_CONSTEXPR_VAR Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that BaselineTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber BaselineTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static MOZ_CONSTEXPR_VAR FloatRegister FloatReg0 = f0;
+static MOZ_CONSTEXPR_VAR FloatRegister FloatReg1 = f2;
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_ION
+
+#endif /* jit_mips_BaselineRegisters_mips_h */
+
diff --git a/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.cpp
new file mode 100644
index 0000000..fd9be18
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -0,0 +1,2343 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/CodeGenerator-mips.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/IonFrames.h"
+#include "jit/JitCompartment.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/Shape.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using JS::GenericNaN;
+
+// shared
+CodeGeneratorMIPS::CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+bool
+CodeGeneratorMIPS::generatePrologue()
+{
+ if (gen->compilingAsmJS()) {
+ masm.Push(ra);
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameDepth_);
+ } else {
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ masm.checkStackAlignment();
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::generateEpilogue()
+{
+ masm.bind(&returnLabel_);
+#if JS_TRACE_LOGGING
+ masm.tracelogStop();
+#endif
+ if (gen->compilingAsmJS()) {
+ // Pop the stack we allocated at the start of the function.
+ masm.freeStack(frameDepth_);
+ masm.Pop(ra);
+ masm.abiret();
+ MOZ_ASSERT(masm.framePushed() == 0);
+ } else {
+ // Pop the stack we allocated at the start of the function.
+ masm.freeStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.ret();
+ }
+ return true;
+}
+
+void
+CodeGeneratorMIPS::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock *mir, Assembler::DoubleCondition cond)
+{
+ Label *label = mir->lir()->label();
+ if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+
+ CodeOffsetJump backedge;
+ Label skip;
+ if (fmt == Assembler::DoubleFloat)
+ masm.ma_bc1d(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ else
+ masm.ma_bc1s(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+
+ backedge = masm.jumpWithPatch(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ if (fmt == Assembler::DoubleFloat)
+ masm.branchDouble(cond, lhs, rhs, mir->lir()->label());
+ else
+ masm.branchFloat(cond, lhs, rhs, mir->lir()->label());
+ }
+}
+
+bool
+OutOfLineBailout::accept(CodeGeneratorMIPS *codegen)
+{
+ return codegen->visitOutOfLineBailout(this);
+}
+
+bool
+CodeGeneratorMIPS::visitTestIAndBranch(LTestIAndBranch *test)
+{
+ const LAllocation *opd = test->getOperand(0);
+ MBasicBlock *ifTrue = test->ifTrue();
+ MBasicBlock *ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompare(LCompare *comp)
+{
+ Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
+ const LAllocation *left = comp->getOperand(0);
+ const LAllocation *right = comp->getOperand(1);
+ const LDefinition *def = comp->getDef(0);
+
+ if (right->isConstant())
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def));
+ else if (right->isGeneralReg())
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ else
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareAndBranch(LCompareAndBranch *comp)
+{
+ Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
+ if (comp->right()->isConstant()) {
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ emitBranch(ToRegister(comp->left()), ToAddress(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::generateOutOfLineCode()
+{
+ if (!CodeGeneratorShared::generateOutOfLineCode())
+ return false;
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
+
+ masm.branch(handler);
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::bailoutFrom(Label *label, LSnapshot *snapshot)
+{
+ if (masm.bailed())
+ return false;
+ MOZ_ASSERT(label->used());
+ MOZ_ASSERT(!label->bound());
+
+ CompileInfo &info = snapshot->mir()->block()->info();
+ switch (info.executionMode()) {
+ case ParallelExecution: {
+ // in parallel mode, make no attempt to recover, just signal an error.
+ OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
+ snapshot->mir()->block(),
+ snapshot->mir()->pc());
+ masm.retarget(label, ool->entry());
+ return true;
+ }
+ case SequentialExecution:
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("No such execution mode");
+ }
+
+ if (!encode(snapshot))
+ return false;
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+ frameClass_.frameSize() == masm.framePushed());
+
+ // We don't use table bailouts because retargeting is easier this way.
+ OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+ if (!addOutOfLineCode(ool)) {
+ return false;
+ }
+
+ masm.retarget(label, ool->entry());
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::bailout(LSnapshot *snapshot)
+{
+ Label label;
+ masm.jump(&label);
+ return bailoutFrom(&label, snapshot);
+}
+
+bool
+CodeGeneratorMIPS::visitOutOfLineBailout(OutOfLineBailout *ool)
+{
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(2 * sizeof(void *)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()), Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitMinMaxD(LMinMaxD *ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ MOZ_ASSERT(first == output);
+
+ Assembler::DoubleCondition cond = ins->mir()->isMax()
+ ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, returnSecond, done;
+
+ // First or second is NaN, result is NaN.
+ masm.ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ // Make sure we handle -0 and 0 right.
+ masm.ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ masm.ma_bc1d(first, second, &returnSecond, cond, ShortJump);
+ masm.ma_b(&done, ShortJump);
+
+ // Check for zero.
+ masm.bind(&equal);
+ masm.loadConstantDouble(0.0, ScratchFloatReg);
+ // First wasn't 0 or -0, so just return it.
+ masm.ma_bc1d(first, ScratchFloatReg, &done, Assembler::DoubleNotEqualOrUnordered, ShortJump);
+
+ // So now both operands are either -0 or 0.
+ if (ins->mir()->isMax()) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ masm.addDouble(second, first);
+ } else {
+ masm.negateDouble(first);
+ masm.subDouble(second, first);
+ masm.negateDouble(first);
+ }
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&nan);
+ masm.loadConstantDouble(GenericNaN(), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&returnSecond);
+ masm.moveDouble(second, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAbsD(LAbsD *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_absd(input, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAbsF(LAbsF *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_abss(input, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitSqrtD(LSqrtD *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrtd(output, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitSqrtF(LSqrtF *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrts(output, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAddI(LAddI *ins)
+{
+ const LAllocation *lhs = ins->getOperand(0);
+ const LAllocation *rhs = ins->getOperand(1);
+ const LDefinition *dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return true;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ if (!bailoutFrom(&overflow, ins->snapshot()))
+ return false;
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitSubI(LSubI *ins)
+{
+ const LAllocation *lhs = ins->getOperand(0);
+ const LAllocation *rhs = ins->getOperand(1);
+ const LDefinition *dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return true;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ if (!bailoutFrom(&overflow, ins->snapshot()))
+ return false;
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitMulI(LMulI *ins)
+{
+ const LAllocation *lhs = ins->lhs();
+ const LAllocation *rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul *mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ if (!bailoutCmp32(cond, src, Imm32(0), ins->snapshot()))
+ return false;
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ if (!bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot()))
+ return false;
+ }
+ masm.ma_negu(dest, src);
+ break;
+ case 0:
+ masm.move32(Imm32(0), dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow);
+
+ if (!bailoutFrom(&mulTwoOverflow, ins->snapshot()))
+ return false;
+ } else {
+ masm.as_addu(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_sll(dest, src, Imm32(shift));
+ return true;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.ma_sll(dest, src, Imm32(shift - shift_rest));
+ masm.add32(src, dest);
+ if (shift_rest != 0)
+ masm.ma_sll(dest, dest, Imm32(shift_rest));
+ return true;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2, shift)
+ masm.ma_sll(dest, src, Imm32(shift));
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.ma_sra(ScratchRegister, dest, Imm32(shift));
+ if (!bailoutCmp32(Assembler::NotEqual, src, ScratchRegister, ins->snapshot()))
+ return false;
+ return true;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ if (!bailoutFrom(&mulConstOverflow, ins->snapshot()))
+ return false;
+ } else {
+ masm.ma_mult(src, Imm32(ToInt32(rhs)));
+ masm.as_mflo(dest);
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs), &multRegOverflow);
+ if (!bailoutFrom(&multRegOverflow, ins->snapshot()))
+ return false;
+ } else {
+ masm.as_mult(ToRegister(lhs), ToRegister(rhs));
+ masm.as_mflo(dest);
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.ma_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ if (!bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot()))
+ return false;
+
+ masm.bind(&done);
+ }
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitDivI(LDivI *ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv *mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(¬zero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot()))
+ return false;
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, ¬MinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot()))
+ return false;
+ }
+ masm.bind(¬MinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ if (!bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot()))
+ return false;
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.as_div(lhs, rhs);
+ masm.as_mflo(dest);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ if (!bailoutFrom(&remainderNonZero, ins->snapshot()))
+ return false;
+ }
+
+ masm.bind(&done);
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitDivPowTwoI(LDivPowTwoI *ins)
+{
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv *mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.ma_sll(tmp, lhs, Imm32(32 - shift));
+ if (!bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot()))
+ return false;
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ return true;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.ma_sra(tmp, lhs, Imm32(31));
+ masm.ma_srl(tmp, tmp, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ } else {
+ masm.ma_srl(tmp, lhs, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.ma_sra(dest, tmp, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+
+ return true;
+
+}
+
+bool
+CodeGeneratorMIPS::visitModI(LModI *ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod *mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot()))
+ return false;
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), ¬Negative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ masm.bind(¬Negative);
+ }
+
+ masm.as_div(lhs, rhs);
+ masm.as_mfhi(dest);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ if (!bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ }
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitModPowTwoI(LModPowTwoI *ins)
+{
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod *mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ if (!bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot()))
+ return false;
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitModMaskI(LModMaskI *ins)
+{
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp = ToRegister(ins->getTemp(0));
+ MMod *mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp, ins->shift(), &bail);
+ if (!bailoutFrom(&bail, ins->snapshot()))
+ return false;
+ } else {
+ masm.ma_mod_mask(src, dest, tmp, ins->shift(), nullptr);
+ }
+ return true;
+}
+bool
+CodeGeneratorMIPS::visitBitNotI(LBitNotI *ins)
+{
+ const LAllocation *input = ins->getOperand(0);
+ const LDefinition *dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_not(ToRegister(dest), ToRegister(input));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitBitOpI(LBitOpI *ins)
+{
+ const LAllocation *lhs = ins->getOperand(0);
+ const LAllocation *rhs = ins->getOperand(1);
+ const LDefinition *dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOP_BITOR:
+ if (rhs->isConstant())
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITXOR:
+ if (rhs->isConstant())
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITAND:
+ if (rhs->isConstant())
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unexpected binary opcode");
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitShiftI(LShiftI *ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation *rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.ma_sll(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_URSH:
+ if (shift) {
+ masm.ma_srl(dest, lhs, Imm32(shift));
+ } else {
+ // x >>> 0 can overflow.
+ masm.move32(lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ masm.ma_sll(dest, lhs, dest);
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(dest, lhs, dest);
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
+ }
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitUrshD(LUrshD *ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation *rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_srl(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitPowHalfD(LPowHalfD *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), ScratchFloatReg);
+ masm.ma_bc1d(input, ScratchFloatReg, &skip, Assembler::DoubleNotEqualOrUnordered, ShortJump);
+ masm.as_negd(output, ScratchFloatReg);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, ScratchFloatReg);
+ masm.as_addd(output, input, ScratchFloatReg);
+ masm.as_sqrtd(output, output);
+
+ masm.bind(&done);
+ return true;
+}
+
+MoveOperand
+CodeGeneratorMIPS::toMoveOperand(const LAllocation *a) const
+{
+ if (a->isGeneralReg())
+ return MoveOperand(ToRegister(a));
+ if (a->isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MOZ_ASSERT((ToStackOffset(a) & 3) == 0);
+ int32_t offset = ToStackOffset(a);
+
+ // The way the stack slots work, we assume that everything from
+ // depth == 0 downwards is writable. However, since our frame is included
+ // in this, ensure that the frame gets skipped.
+ if (gen->compilingAsmJS())
+ offset -= AlignmentMidPrologue;
+
+ return MoveOperand(StackPointer, offset);
+}
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS>
+{
+ MTableSwitch *mir_;
+ CodeLabel jumpLabel_;
+
+ bool accept(CodeGeneratorMIPS *codegen) {
+ return codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch *mir)
+ : mir_(mir)
+ {}
+
+ MTableSwitch *mir() const {
+ return mir_;
+ }
+
+ CodeLabel *jumpLabel() {
+ return &jumpLabel_;
+ }
+};
+
+bool
+CodeGeneratorMIPS::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
+{
+ MTableSwitch *mir = ool->mir();
+
+ masm.align(sizeof(void*));
+ masm.bind(ool->jumpLabel()->src());
+ if (!masm.addCodeLabel(*ool->jumpLabel()))
+ return false;
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock *caseblock = mir->getCase(i)->lir();
+ Label *caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.ma_li(ScratchRegister, cl.dest());
+ masm.branch(ScratchRegister);
+ cl.src()->bind(caseoffset);
+ if (!masm.addCodeLabel(cl))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index,
+ const Register &address)
+{
+ Label *defaultcase = mir->getDefault()->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0)
+ masm.subPtr(Imm32(mir->low()), index);
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(mir);
+ if (!addOutOfLineCode(ool))
+ return false;
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(address, ool->jumpLabel()->dest());
+ masm.lshiftPtr(Imm32(4), index);
+ masm.addPtr(index, address);
+
+ masm.branch(address);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitMathD(LMathD *math)
+{
+ const LAllocation *src1 = math->getOperand(0);
+ const LAllocation *src2 = math->getOperand(1);
+ const LDefinition *output = math->getDef(0);
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_addd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_SUB:
+ masm.as_subd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_MUL:
+ masm.as_muld(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_DIV:
+ masm.as_divd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unexpected opcode");
+ }
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitMathF(LMathF *math)
+{
+ const LAllocation *src1 = math->getOperand(0);
+ const LAllocation *src2 = math->getOperand(1);
+ const LDefinition *output = math->getDef(0);
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_adds(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_SUB:
+ masm.as_subs(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_MUL:
+ masm.as_muls(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ case JSOP_DIV:
+ masm.as_divs(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unexpected opcode");
+ }
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitFloor(LFloor *lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloatReg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
+ return false;
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorwd(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
+ return false;
+
+ masm.bind(&done);
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitFloorF(LFloorF *lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloatReg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantFloat32(0.0, scratch);
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleLo(input, SecondScratchReg);
+ if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
+ return false;
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorws(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
+ return false;
+
+ masm.bind(&done);
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitRound(LRound *lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchFloatReg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load 0.5 in the temp register.
+ masm.loadConstantDouble(0.5, temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
+ return false;
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.loadConstantDouble(0.5, scratch);
+ masm.addDouble(input, scratch);
+ masm.as_floorwd(scratch, scratch);
+
+ masm.moveFromDoubleLo(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
+ return false;
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+ masm.addDouble(input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ if (!bailoutFrom(&bail, lir->snapshot()))
+ return false;
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorwd(scratch, temp);
+ masm.moveFromDoubleLo(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ masm.bind(&end);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitRoundF(LRoundF *lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchFloatReg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load 0.5 in the temp register.
+ masm.loadConstantFloat32(0.5, temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantFloat32(0.0, scratch);
+ masm.ma_bc1s(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromFloat32(input, SecondScratchReg);
+ if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
+ return false;
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.loadConstantFloat32(0.5, scratch);
+ masm.as_adds(scratch, input, scratch);
+ masm.as_floorws(scratch, scratch);
+
+ masm.moveFromFloat32(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
+ return false;
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+ masm.as_adds(temp, input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ if (!bailoutFrom(&bail, lir->snapshot()))
+ return false;
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorws(scratch, temp);
+ masm.moveFromFloat32(scratch, output);
+
+ if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
+ return false;
+
+ masm.bind(&end);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitTruncateDToInt32(LTruncateDToInt32 *ins)
+{
+ return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
+}
+
+bool
+CodeGeneratorMIPS::visitTruncateFToInt32(LTruncateFToInt32 *ins)
+{
+ return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()));
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToValue(LInstruction *ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToOutValue(LInstruction *ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToTempValue(LInstruction *ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+bool
+CodeGeneratorMIPS::visitValue(LValue *value)
+{
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitBox(LBox *box)
+{
+ const LDefinition *type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // For NUNBOX32, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint *box)
+{
+ const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
+ const LDefinition *type = box->getDef(TYPE_INDEX);
+ const LAllocation *in = box->getOperand(0);
+
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType_Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchFloatReg);
+ reg = ScratchFloatReg;
+ }
+ masm.ma_mv(reg, ValueOperand(ToRegister(type), ToRegister(payload)));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitUnbox(LUnbox *unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox *mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ if (mir->fallible()) {
+ if (!bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot()))
+ return false;
+ }
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitDouble(LDouble *ins)
+{
+ const LDefinition *out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitFloat32(LFloat32 *ins)
+{
+ const LDefinition *out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
+ return true;
+}
+
+Register
+CodeGeneratorMIPS::splitTagForTest(const ValueOperand &value)
+{
+ return value.typeReg();
+}
+
+bool
+CodeGeneratorMIPS::visitTestDAndBranch(LTestDAndBranch *test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock *ifTrue = test->ifTrue();
+ MBasicBlock *ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, ScratchFloatReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitTestFAndBranch(LTestFAndBranch *test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock *ifTrue = test->ifTrue();
+ MBasicBlock *ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0, ScratchFloatReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloatReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloatReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareD(LCompareD *comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareF(LCompareF *comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+ return true;
+}
+
+
+bool
+CodeGeneratorMIPS::visitCompareDAndBranch(LCompareDAndBranch *comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock *ifTrue = comp->ifTrue();
+ MBasicBlock *ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareFAndBranch(LCompareFAndBranch *comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock *ifTrue = comp->ifTrue();
+ MBasicBlock *ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareB(LCompareB *lir)
+{
+ MCompare *mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation *rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output);
+ else
+ masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output);
+ masm.jump(&done);
+ }
+
+ masm.bind(¬Boolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch *lir)
+{
+ MCompare *mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation *rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock *mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue();
+ branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual);
+
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ if (rhs->isConstant())
+ emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ else
+ emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse());
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareV(LCompareV *lir)
+{
+ MCompare *mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ Label notEqual, done;
+ masm.ma_b(lhs.typeReg(), rhs.typeReg(), ¬Equal, Assembler::NotEqual, ShortJump);
+ {
+ masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output);
+ masm.ma_b(&done, ShortJump);
+ }
+ masm.bind(¬Equal);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitCompareVAndBranch(LCompareVAndBranch *lir)
+{
+ MCompare *mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual);
+ emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(), lir->ifFalse());
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitBitAndAndBranch(LBitAndAndBranch *lir)
+{
+ if (lir->right()->isConstant())
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ else
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()), ToRegister(lir->right()));
+ emitBranch(ScratchRegister, ScratchRegister, Assembler::NonZero, lir->ifTrue(),
+ lir->ifFalse());
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNotI(LNotI *ins)
+{
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNotD(LNotD *ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ Label falsey, done;
+ masm.loadConstantDouble(0.0, ScratchFloatReg);
+ masm.ma_bc1d(in, ScratchFloatReg, &falsey, Assembler::DoubleEqualOrUnordered, ShortJump);
+
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&falsey);
+ masm.move32(Imm32(1), dest);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNotF(LNotF *ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ Label falsey, done;
+ masm.loadConstantFloat32(0.0, ScratchFloatReg);
+ masm.ma_bc1s(in, ScratchFloatReg, &falsey, Assembler::DoubleEqualOrUnordered, ShortJump);
+
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&falsey);
+ masm.move32(Imm32(1), dest);
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitLoadSlotV(LLoadSlotV *load)
+{
+ const ValueOperand out = ToOutValue(load);
+ Register base = ToRegister(load->input());
+ int32_t offset = load->mir()->slot() * sizeof(js::Value);
+
+ masm.loadValue(Address(base, offset), out);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitLoadSlotT(LLoadSlotT *load)
+{
+ Register base = ToRegister(load->input());
+ int32_t offset = load->mir()->slot() * sizeof(js::Value);
+
+ if (load->mir()->type() == MIRType_Double)
+ masm.loadInt32OrDouble(Address(base, offset), ToFloatRegister(load->output()));
+ else
+ masm.load32(Address(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitStoreSlotT(LStoreSlotT *store)
+{
+ Register base = ToRegister(store->slots());
+ int32_t offset = store->mir()->slot() * sizeof(js::Value);
+
+ const LAllocation *value = store->value();
+ MIRType valueType = store->mir()->value()->type();
+
+ if (store->mir()->needsBarrier())
+ emitPreBarrier(Address(base, offset), store->mir()->slotType());
+
+ if (valueType == MIRType_Double) {
+ masm.storeDouble(ToFloatRegister(value), Address(base, offset));
+ return true;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != store->mir()->slotType())
+ masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Address(base, offset));
+
+ // Store the payload.
+ if (value->isConstant())
+ masm.storePayload(*value->toConstant(), Address(base, offset));
+ else
+ masm.storePayload(ToRegister(value), Address(base, offset));
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitLoadElementT(LLoadElementT *load)
+{
+ Register base = ToRegister(load->elements());
+ if (load->mir()->type() == MIRType_Double) {
+ FloatRegister fpreg = ToFloatRegister(load->output());
+ if (load->index()->isConstant()) {
+ Address source(base, ToInt32(load->index()) * sizeof(Value));
+ if (load->mir()->loadDoubles())
+ masm.loadDouble(source, fpreg);
+ else
+ masm.loadInt32OrDouble(source, fpreg);
+ } else {
+ Register index = ToRegister(load->index());
+ if (load->mir()->loadDoubles())
+ masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg);
+ else
+ masm.loadInt32OrDouble(base, index, fpreg);
+ }
+ } else {
+ if (load->index()->isConstant()) {
+ Address source(base, ToInt32(load->index()) * sizeof(Value));
+ masm.load32(source, ToRegister(load->output()));
+ } else {
+ BaseIndex source(base, ToRegister(load->index()), TimesEight);
+ masm.load32(source, ToRegister(load->output()));
+ }
+ }
+ MOZ_ASSERT(!load->mir()->needsHoleCheck());
+ return true;
+}
+
+void
+CodeGeneratorMIPS::storeElementTyped(const LAllocation *value, MIRType valueType,
+ MIRType elementType, const Register &elements,
+ const LAllocation *index)
+{
+ if (index->isConstant()) {
+ Address dest = Address(elements, ToInt32(index) * sizeof(Value));
+ if (valueType == MIRType_Double) {
+ masm.storeDouble(ToFloatRegister(value), Address(dest.base, dest.offset));
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != elementType)
+ masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value->isConstant())
+ masm.storePayload(*value->toConstant(), dest);
+ else
+ masm.storePayload(ToRegister(value), dest);
+ } else {
+ Register indexReg = ToRegister(index);
+ if (valueType == MIRType_Double) {
+ masm.storeDouble(ToFloatRegister(value), BaseIndex(elements, indexReg, TimesEight));
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != elementType)
+ masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg);
+
+ // Store the payload.
+ if (value->isConstant())
+ masm.storePayload(*value->toConstant(), elements, indexReg);
+ else
+ masm.storePayload(ToRegister(value), elements, indexReg);
+ }
+}
+
+bool
+CodeGeneratorMIPS::visitGuardShape(LGuardShape *guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfShape()), tmp);
+ return bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()),
+ guard->snapshot());
+}
+
+bool
+CodeGeneratorMIPS::visitGuardObjectType(LGuardObjectType *guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp);
+ Assembler::Condition cond = guard->mir()->bailOnEquality()
+ ? Assembler::Equal
+ : Assembler::NotEqual;
+ return bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->typeObject()), guard->snapshot());
+}
+
+bool
+CodeGeneratorMIPS::visitGuardClass(LGuardClass *guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadObjClass(obj, tmp);
+ if (!bailoutCmpPtr(Assembler::NotEqual, tmp, Imm32((uint32_t)guard->mir()->getClass()),
+ guard->snapshot()))
+ return false;
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitImplicitThis(LImplicitThis *lir)
+{
+ Register callee = ToRegister(lir->callee());
+ const ValueOperand out = ToOutValue(lir);
+
+ // The implicit |this| is always |undefined| if the function's environment
+ // is the current global.
+ masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), out.typeReg());
+ GlobalObject *global = &gen->info().script()->global();
+
+ // TODO: OOL stub path.
+ if (!bailoutCmpPtr(Assembler::NotEqual, out.typeReg(), ImmGCPtr(global), lir->snapshot()))
+ return false;
+
+ masm.moveValue(UndefinedValue(), out);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitInterruptCheck(LInterruptCheck *lir)
+{
+ OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
+ if (!ool)
+ return false;
+
+ masm.branch32(Assembler::NotEqual,
+ AbsoluteAddress(GetIonContext()->runtime->addressOfInterrupt()), Imm32(0),
+ ool->entry());
+ masm.bind(ool->rejoin());
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::generateInvalidateEpilogue()
+{
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void *); i += Assembler::nopSize())
+ masm.nop();
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+ JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.branch(thunk);
+
+ // We should never reach this point in JIT code -- the invalidation thunk
+ // should pop the invalidated JS frame and return directly to its caller.
+ masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
+ return true;
+}
+
+void
+DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
+{
+ // Can always use the scratch register on MIPS.
+ addState->dispatchScratch = ScratchRegister;
+}
+
+bool
+CodeGeneratorMIPS::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+bool
+CodeGeneratorMIPS::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
+{
+ const MAsmJSLoadHeap *mir = ins->mir();
+ const LAllocation *ptr = ins->ptr();
+ const LDefinition *out = ins->output();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->viewType()) {
+ case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
+ case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
+ case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
+ case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
+ case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break;
+ case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break;
+ case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
+ case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
+ default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(mir->skipBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return true;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (mir->skipBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return true;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label outOfRange;
+ Label done;
+ masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32)
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ else
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32)
+ masm.convertDoubleToFloat32(NANReg, ToFloatRegister(out));
+ else
+ masm.moveDouble(NANReg, ToFloatRegister(out));
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+
+ return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
+{
+ const MAsmJSStoreHeap *mir = ins->mir();
+ const LAllocation *value = ins->value();
+ const LAllocation *ptr = ins->ptr();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->viewType()) {
+ case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
+ case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
+ case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
+ case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
+ case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break;
+ case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break;
+ case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
+ case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
+ default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(mir->skipBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value), Address(HeapReg, ptrImm));
+ } else {
+ masm.storeDouble(ToFloatRegister(value), Address(HeapReg, ptrImm));
+ }
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return true;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (mir->skipBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return true;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label rejoin;
+ masm.ma_b(ptrReg, ScratchRegister, &rejoin, Assembler::AboveOrEqual, ShortJump);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.bind(&rejoin);
+
+ return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
+{
+ const MAsmJSPassStackArg *mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeDouble(ToFloatRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
+ }
+ }
+
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitUDiv(LUDiv *ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+
+ Label done;
+ if (ins->mir()->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ Label notzero;
+ masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(¬zero);
+ } else {
+ MOZ_ASSERT(ins->mir()->fallible());
+ if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ }
+
+ masm.as_divu(lhs, rhs);
+ masm.as_mflo(output);
+
+ if (!ins->mir()->isTruncated()) {
+ if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()))
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitUMod(LUMod *ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ if (ins->mir()->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(¬zero);
+ } else {
+ MOZ_ASSERT(ins->mir()->fallible());
+ if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
+ return false;
+ }
+ }
+
+ masm.as_divu(lhs, rhs);
+ masm.as_mfhi(output);
+
+ if (!ins->mir()->isTruncated()) {
+ if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()))
+ return false;
+ }
+
+ masm.bind(&done);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitEffectiveAddress(LEffectiveAddress *ins)
+{
+ const MEffectiveAddress *mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
+{
+ const MAsmJSLoadGlobalVar *mir = ins->mir();
+ unsigned addr = mir->globalDataOffset();
+ if (mir->type() == MIRType_Int32)
+ masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
+ else if (mir->type() == MIRType_Float32)
+ masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+ else
+ masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
+{
+ const MAsmJSStoreGlobalVar *mir = ins->mir();
+
+ MIRType type = mir->value()->type();
+ MOZ_ASSERT(IsNumberType(type));
+ unsigned addr = mir->globalDataOffset();
+ if (mir->value()->type() == MIRType_Int32)
+ masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
+ else if (mir->value()->type() == MIRType_Float32)
+ masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+ else
+ masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
+{
+ const MAsmJSLoadFuncPtr *mir = ins->mir();
+
+ Register index = ToRegister(ins->index());
+ Register tmp = ToRegister(ins->temp());
+ Register out = ToRegister(ins->output());
+ unsigned addr = mir->globalDataOffset();
+
+ BaseIndex source(GlobalReg, index, TimesFour, addr);
+ masm.load32(source, out);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
+{
+ const MAsmJSLoadFFIFunc *mir = ins->mir();
+ masm.loadPtr(Address(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output()));
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNegI(LNegI *ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_negu(output, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNegD(LNegD *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negd(output, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitNegF(LNegF *ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negs(output, input);
+ return true;
+}
+
+bool
+CodeGeneratorMIPS::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+JitCode *
+JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.h b/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.h
new file mode 100644
index 0000000..95933a4
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/CodeGenerator-mips.h
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_CodeGenerator_mips_h
+#define jit_mips_CodeGenerator_mips_h
+
+#include "jit/mips/Assembler-mips.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorMIPS : public CodeGeneratorShared
+{
+ friend class MoveResolverMIPS;
+
+ CodeGeneratorMIPS *thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ // Label for the common return path.
+ NonAssertingLabel returnLabel_;
+ NonAssertingLabel deoptLabel_;
+
+ inline Address ToAddress(const LAllocation &a) {
+ MOZ_ASSERT(a.isMemory());
+ int32_t offset = ToStackOffset(&a);
+
+ // The way the stack slots work, we assume that everything from
+ // depth == 0 downwards is writable however, since our frame is
+ // included in this, ensure that the frame gets skipped.
+ if (gen->compilingAsmJS())
+ offset -= AlignmentMidPrologue;
+
+ return Address(StackPointer, offset);
+ }
+
+ inline Address ToAddress(const LAllocation *a) {
+ return ToAddress(*a);
+ }
+
+ inline Operand ToOperand(const LAllocation &a) {
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ if (a.isFloatReg())
+ return Operand(a.toFloatReg()->reg());
+
+ MOZ_ASSERT(a.isMemory());
+ int32_t offset = ToStackOffset(&a);
+
+ // The way the stack slots work, we assume that everything from
+ // depth == 0 downwards is writable however, since our frame is
+ // included in this, ensure that the frame gets skipped.
+ if (gen->compilingAsmJS())
+ offset -= AlignmentMidPrologue;
+
+ return Operand(StackPointer, offset);
+ }
+ inline Operand ToOperand(const LAllocation *a) {
+ return ToOperand(*a);
+ }
+ inline Operand ToOperand(const LDefinition *def) {
+ return ToOperand(def->output());
+ }
+
+ MoveOperand toMoveOperand(const LAllocation *a) const;
+
+ template <typename T1, typename T2>
+ bool bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot *snapshot) {
+ bool goodBailout;
+ Label skip;
+ masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(c), ShortJump);
+ goodBailout = bailout(snapshot);
+ masm.bind(&skip);
+ return goodBailout;
+ }
+ template<typename T>
+ bool bailoutCmp32(Assembler::Condition c, Operand lhs, T rhs, LSnapshot *snapshot) {
+ if (lhs.getTag() == Operand::REG)
+ return bailoutCmp32(c, lhs.toReg(), rhs, snapshot);
+ if (lhs.getTag() == Operand::MEM)
+ return bailoutCmp32(c, lhs.toAddress(), rhs, snapshot);
+ MOZ_ASSUME_UNREACHABLE("Invalid operand tag.");
+ return false;
+ }
+ template<typename T>
+ bool bailoutTest32(Assembler::Condition c, Register lhs, T rhs, LSnapshot *snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ return bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ bool bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot *snapshot) {
+ return bailoutCmp32(c, lhs, rhs, snapshot);
+ }
+ bool bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot *snapshot) {
+ Label bail;
+ masm.branchTestPtr(c, lhs, rhs, &bail);
+ return bailoutFrom(&bail, snapshot);
+ }
+
+ bool bailoutFrom(Label *label, LSnapshot *snapshot);
+ bool bailout(LSnapshot *snapshot);
+
+ protected:
+ bool generatePrologue();
+ bool generateEpilogue();
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock *mir, Assembler::Condition cond)
+ {
+ Label *label = mir->lir()->label();
+ if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge;
+ Label skip;
+
+ masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ backedge = masm.jumpWithPatch(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ masm.ma_b(lhs, rhs, label, cond);
+ }
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock *mir, Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock *mirTrue, MBasicBlock *mirFalse)
+ {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand &value,
+ MBasicBlock *ifTrue, MBasicBlock *ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand &value,
+ MBasicBlock *ifTrue, MBasicBlock *ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+
+ bool emitTableSwitchDispatch(MTableSwitch *mir, const Register &index, const Register &base);
+
+ public:
+ // Instruction visitors.
+ virtual bool visitMinMaxD(LMinMaxD *ins);
+ virtual bool visitAbsD(LAbsD *ins);
+ virtual bool visitAbsF(LAbsF *ins);
+ virtual bool visitSqrtD(LSqrtD *ins);
+ virtual bool visitSqrtF(LSqrtF *ins);
+ virtual bool visitAddI(LAddI *ins);
+ virtual bool visitSubI(LSubI *ins);
+ virtual bool visitBitNotI(LBitNotI *ins);
+ virtual bool visitBitOpI(LBitOpI *ins);
+
+ virtual bool visitMulI(LMulI *ins);
+
+ virtual bool visitDivI(LDivI *ins);
+ virtual bool visitDivPowTwoI(LDivPowTwoI *ins);
+ virtual bool visitModI(LModI *ins);
+ virtual bool visitModPowTwoI(LModPowTwoI *ins);
+ virtual bool visitModMaskI(LModMaskI *ins);
+ virtual bool visitPowHalfD(LPowHalfD *ins);
+ virtual bool visitShiftI(LShiftI *ins);
+ virtual bool visitUrshD(LUrshD *ins);
+
+ virtual bool visitTestIAndBranch(LTestIAndBranch *test);
+ virtual bool visitCompare(LCompare *comp);
+ virtual bool visitCompareAndBranch(LCompareAndBranch *comp);
+ virtual bool visitTestDAndBranch(LTestDAndBranch *test);
+ virtual bool visitTestFAndBranch(LTestFAndBranch *test);
+ virtual bool visitCompareD(LCompareD *comp);
+ virtual bool visitCompareF(LCompareF *comp);
+ virtual bool visitCompareDAndBranch(LCompareDAndBranch *comp);
+ virtual bool visitCompareFAndBranch(LCompareFAndBranch *comp);
+ virtual bool visitCompareB(LCompareB *lir);
+ virtual bool visitCompareBAndBranch(LCompareBAndBranch *lir);
+ virtual bool visitCompareV(LCompareV *lir);
+ virtual bool visitCompareVAndBranch(LCompareVAndBranch *lir);
+ virtual bool visitBitAndAndBranch(LBitAndAndBranch *lir);
+ virtual bool visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir);
+ virtual bool visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir);
+ virtual bool visitNotI(LNotI *ins);
+ virtual bool visitNotD(LNotD *ins);
+ virtual bool visitNotF(LNotF *ins);
+
+ virtual bool visitMathD(LMathD *math);
+ virtual bool visitMathF(LMathF *math);
+ virtual bool visitFloor(LFloor *lir);
+ virtual bool visitFloorF(LFloorF *lir);
+ virtual bool visitRound(LRound *lir);
+ virtual bool visitRoundF(LRoundF *lir);
+ virtual bool visitTruncateDToInt32(LTruncateDToInt32 *ins);
+ virtual bool visitTruncateFToInt32(LTruncateFToInt32 *ins);
+
+ // Out of line visitors.
+ bool visitOutOfLineBailout(OutOfLineBailout *ool);
+ bool visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool);
+
+ protected:
+ ValueOperand ToValue(LInstruction *ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction *ins);
+ ValueOperand ToTempValue(LInstruction *ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand &value);
+
+ void storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType,
+ const Register &elements, const LAllocation *index);
+
+ public:
+ CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
+
+ public:
+ bool visitBox(LBox *box);
+ bool visitBoxFloatingPoint(LBoxFloatingPoint *box);
+ bool visitUnbox(LUnbox *unbox);
+ bool visitValue(LValue *value);
+ bool visitDouble(LDouble *ins);
+ bool visitFloat32(LFloat32 *ins);
+
+ bool visitLoadSlotV(LLoadSlotV *load);
+ bool visitLoadSlotT(LLoadSlotT *load);
+ bool visitStoreSlotT(LStoreSlotT *load);
+
+ bool visitLoadElementT(LLoadElementT *load);
+
+ bool visitGuardShape(LGuardShape *guard);
+ bool visitGuardObjectType(LGuardObjectType *guard);
+ bool visitGuardClass(LGuardClass *guard);
+ bool visitImplicitThis(LImplicitThis *lir);
+
+ bool visitInterruptCheck(LInterruptCheck *lir);
+
+ bool visitNegI(LNegI *lir);
+ bool visitNegD(LNegD *lir);
+ bool visitNegF(LNegF *lir);
+ bool visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
+ bool visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
+ bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
+ bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
+ bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
+ bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
+ bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
+ bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
+
+ bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
+
+ bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
+
+ bool generateInvalidateEpilogue();
+ protected:
+ void postAsmJSCall(LAsmJSCall *lir) {}
+
+ bool visitEffectiveAddress(LEffectiveAddress *ins);
+ bool visitUDiv(LUDiv *ins);
+ bool visitUMod(LUMod *ins);
+};
+
+typedef CodeGeneratorMIPS CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorMIPS>
+{
+ LSnapshot *snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot *snapshot, uint32_t frameSize)
+ : snapshot_(snapshot),
+ frameSize_(frameSize)
+ { }
+
+ bool accept(CodeGeneratorMIPS *codegen);
+
+ LSnapshot *snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_CodeGenerator_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/LIR-mips.h b/src/third_party/mozjs/js/src/jit/mips/LIR-mips.h
new file mode 100644
index 0000000..7a7d51c
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/LIR-mips.h
@@ -0,0 +1,404 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_LIR_mips_h
+#define jit_mips_LIR_mips_h
+
+namespace js {
+namespace jit {
+
+class LBox : public LInstructionHelper<2, 1, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(Box);
+
+ LBox(const LAllocation &in_payload, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, in_payload);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char *extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation &in, const LDefinition &temp, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char *extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox *mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation *payload() {
+ return getOperand(0);
+ }
+ const LAllocation *type() {
+ return getOperand(1);
+ }
+ const char *extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(MIRType type)
+ : type_(type)
+ { }
+
+ MUnbox *mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char *extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LAsmJSUInt32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AsmJSUInt32ToDouble)
+
+ LAsmJSUInt32ToDouble(const LAllocation &input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LAsmJSUInt32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AsmJSUInt32ToFloat32)
+
+ LAsmJSUInt32ToFloat32(const LAllocation &input) {
+ setOperand(0, input);
+ }
+};
+
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation &lhs, const LAllocation &rhs,
+ const LDefinition &temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv *mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation &lhs, int32_t shift, const LDefinition &temp)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation *numerator() {
+ return getOperand(0);
+ }
+
+ int32_t shift() {
+ return shift_;
+ }
+
+ MDiv *mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation &lhs, const LAllocation &rhs,
+ const LDefinition &callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition *callTemp() {
+ return getTemp(0);
+ }
+
+ MMod *mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift()
+ {
+ return shift_;
+ }
+
+ LModPowTwoI(const LAllocation &lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ MMod *mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation &lhs, const LDefinition &temp1, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp1);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+
+ MMod *mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LPowHalfD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(PowHalfD);
+ LPowHalfD(const LAllocation &input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation *input() {
+ return getOperand(0);
+ }
+ const LDefinition *output() {
+ return getDef(0);
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
+ const LDefinition &jumpTablePointer, MTableSwitch *ins) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch *mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation *index() {
+ return getOperand(0);
+ }
+ const LDefinition *tempInt() {
+ return getTemp(0);
+ }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition *tempPointer() {
+ return getTemp(1);
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LDefinition &inputCopy, const LDefinition &floatCopy,
+ const LDefinition &jumpTablePointer, MTableSwitch *ins)
+ {
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch *mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition *tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition *tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition *tempPointer() {
+ return getTemp(2);
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardShape);
+
+ LGuardShape(const LAllocation &in, const LDefinition &temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardShape *mir() const {
+ return mir_->toGuardShape();
+ }
+ const LDefinition *tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LGuardObjectType : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardObjectType);
+
+ LGuardObjectType(const LAllocation &in, const LDefinition &temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardObjectType *mir() const {
+ return mir_->toGuardObjectType();
+ }
+ const LDefinition *tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LInterruptCheck : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(InterruptCheck);
+};
+
+class LMulI : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(MulI);
+
+ MMul *mir() {
+ return mir_->toMul();
+ }
+};
+
+class LUDiv : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UDiv);
+
+ MDiv *mir() {
+ return mir_->toDiv();
+ }
+};
+
+class LUMod : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UMod);
+
+ MMod *mir() {
+ return mir_->toMod();
+ }
+};
+
+class LAsmJSLoadFuncPtr : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(AsmJSLoadFuncPtr);
+ LAsmJSLoadFuncPtr(const LAllocation &index, const LDefinition &temp) {
+ setOperand(0, index);
+ setTemp(0, temp);
+ }
+ const MAsmJSLoadFuncPtr *mir() const {
+ return mir_->toAsmJSLoadFuncPtr();
+ }
+ const LAllocation *index() {
+ return getOperand(0);
+ }
+ const LDefinition *temp() {
+ return getTemp(0);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_LIR_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/LOpcodes-mips.h b/src/third_party/mozjs/js/src/jit/mips/LOpcodes-mips.h
new file mode 100644
index 0000000..cb2aa37
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/LOpcodes-mips.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_LOpcodes_mips_h__
+#define jit_mips_LOpcodes_mips_h__
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(Unbox) \
+ _(UnboxFloatingPoint) \
+ _(Box) \
+ _(BoxFloatingPoint) \
+ _(DivI) \
+ _(DivPowTwoI) \
+ _(ModI) \
+ _(ModPowTwoI) \
+ _(ModMaskI) \
+ _(PowHalfD) \
+ _(AsmJSUInt32ToDouble) \
+ _(AsmJSUInt32ToFloat32) \
+ _(UDiv) \
+ _(UMod) \
+ _(AsmJSLoadFuncPtr)
+
+#endif // jit_mips_LOpcodes_mips_h__
diff --git a/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.cpp
new file mode 100644
index 0000000..6f84a5f
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.cpp
@@ -0,0 +1,531 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+
+#include "jit/Lowering.h"
+#include "jit/mips/Assembler-mips.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+bool
+LIRGeneratorMIPS::useBox(LInstruction *lir, size_t n, MDefinition *mir,
+ LUse::Policy policy, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType_Value);
+
+ if (!ensureDefined(mir))
+ return false;
+ lir->setOperand(n, LUse(mir->virtualRegister(), policy, useAtStart));
+ lir->setOperand(n + 1, LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
+ return true;
+}
+
+bool
+LIRGeneratorMIPS::useBoxFixed(LInstruction *lir, size_t n, MDefinition *mir, Register reg1,
+ Register reg2)
+{
+ MOZ_ASSERT(mir->type() == MIRType_Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ if (!ensureDefined(mir))
+ return false;
+ lir->setOperand(n, LUse(reg1, mir->virtualRegister()));
+ lir->setOperand(n + 1, LUse(reg2, VirtualRegisterOfPayload(mir)));
+ return true;
+}
+
+LAllocation
+LIRGeneratorMIPS::useByteOpRegister(MDefinition *mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorMIPS::useByteOpRegisterOrNonDoubleConstant(MDefinition *mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+bool
+LIRGeneratorMIPS::lowerConstantDouble(double d, MInstruction *mir)
+{
+ return define(new(alloc()) LDouble(d), mir);
+}
+
+bool
+LIRGeneratorMIPS::lowerConstantFloat32(float d, MInstruction *mir)
+{
+ return define(new(alloc()) LFloat32(d), mir);
+}
+
+bool
+LIRGeneratorMIPS::visitConstant(MConstant *ins)
+{
+ if (ins->type() == MIRType_Double)
+ return lowerConstantDouble(ins->value().toDouble(), ins);
+
+ if (ins->type() == MIRType_Float32)
+ return lowerConstantFloat32(ins->value().toDouble(), ins);
+
+ // Emit non-double constants at their uses.
+ if (ins->canEmitAtUses())
+ return emitAtUses(ins);
+
+ return LIRGeneratorShared::visitConstant(ins);
+}
+
+bool
+LIRGeneratorMIPS::visitBox(MBox *box)
+{
+ MDefinition *inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type()))
+ return defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
+ tempCopy(inner, 0), inner->type()), box);
+
+ if (box->canEmitAtUses())
+ return emitAtUses(box);
+
+ if (inner->isConstant())
+ return defineBox(new(alloc()) LValue(inner->toConstant()->value()), box);
+
+ LBox *lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+ if (vreg >= MAX_VIRTUAL_REGISTERS)
+ return false;
+
+ // Note that because we're using PASSTHROUGH, we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. PASSTHROUGH definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition(inner->virtualRegister(), LDefinition::TypeFrom(inner->type()),
+ LDefinition::PASSTHROUGH));
+ box->setVirtualRegister(vreg);
+ return add(lir);
+}
+
+bool
+LIRGeneratorMIPS::visitUnbox(MUnbox *unbox)
+{
+ // An unbox on mips reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MDefinition *inner = unbox->getOperand(0);
+
+ if (!ensureDefined(inner))
+ return false;
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint *lir = new(alloc()) LUnboxFloatingPoint(unbox->type());
+ if (unbox->fallible() && !assignSnapshot(lir, unbox->bailoutKind()))
+ return false;
+ if (!useBox(lir, LUnboxFloatingPoint::Input, inner))
+ return false;
+ return define(lir, unbox);
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload
+ // register.
+ LUnbox *lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible() && !assignSnapshot(lir, unbox->bailoutKind()))
+ return false;
+
+ // Note that PASSTHROUGH here is illegal, since types and payloads form two
+ // separate intervals. If the type becomes dead before the payload, it
+ // could be used as a Value without the type being recoverable. Unbox's
+ // purpose is to eagerly kill the definition of a type tag, so keeping both
+ // alive (for the purpose of gcmaps) is unappealing. Instead, we create a
+ // new virtual register.
+ return defineReuseInput(lir, unbox, 0);
+}
+
+bool
+LIRGeneratorMIPS::visitReturn(MReturn *ret)
+{
+ MDefinition *opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType_Value);
+
+ LReturn *ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ return fillBoxUses(ins, 0, opd) && add(ins);
+}
+
+// x = !y
+bool
+LIRGeneratorMIPS::lowerForALU(LInstructionHelper<1, 1, 0> *ins,
+ MDefinition *mir, MDefinition *input)
+{
+ ins->setOperand(0, useRegister(input));
+ return define(ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
+}
+
+// z = x+y
+bool
+LIRGeneratorMIPS::lowerForALU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir,
+ MDefinition *lhs, MDefinition *rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ return define(ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
+}
+
+bool
+LIRGeneratorMIPS::lowerForFPU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir,
+ MDefinition *input)
+{
+ ins->setOperand(0, useRegister(input));
+ return define(ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
+}
+
+bool
+LIRGeneratorMIPS::lowerForFPU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir,
+ MDefinition *lhs, MDefinition *rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ return define(ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::DEFAULT));
+}
+
+bool
+LIRGeneratorMIPS::lowerForBitAndAndBranch(LBitAndAndBranch *baab, MInstruction *mir,
+ MDefinition *lhs, MDefinition *rhs)
+{
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ return add(baab, mir);
+}
+
+bool
+LIRGeneratorMIPS::defineUntypedPhi(MPhi *phi, size_t lirIndex)
+{
+ LPhi *type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi *payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ if (typeVreg >= MAX_VIRTUAL_REGISTERS)
+ return false;
+
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ if (payloadVreg >= MAX_VIRTUAL_REGISTERS)
+ return false;
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+ return true;
+}
+
+void
+LIRGeneratorMIPS::lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition,
+ LBlock *block, size_t lirIndex)
+{
+ MDefinition *operand = phi->getOperand(inputPosition);
+ LPhi *type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi *payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET,
+ LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+bool
+LIRGeneratorMIPS::lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir,
+ MDefinition *lhs, MDefinition *rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ return define(ins, mir);
+}
+
+bool
+LIRGeneratorMIPS::lowerDivI(MDiv *div)
+{
+ if (div->isUnsigned())
+ return lowerUDiv(div);
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->value().toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI *lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+ return define(lir, div);
+ }
+ }
+
+ LDivI *lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+ return define(lir, div);
+}
+
+bool
+LIRGeneratorMIPS::lowerMulI(MMul *mul, MDefinition *lhs, MDefinition *rhs)
+{
+ LMulI *lir = new(alloc()) LMulI;
+ if (mul->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+
+ return lowerForALU(lir, mul, lhs, rhs);
+}
+
+bool
+LIRGeneratorMIPS::lowerModI(MMod *mod)
+{
+ if (mod->isUnsigned())
+ return lowerUMod(mod);
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->value().toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI *lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+ return define(lir, mod);
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI *lir = new(alloc()) LModMaskI(useRegister(mod->lhs()),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+ return define(lir, mod);
+ }
+ }
+ LModI *lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+ return define(lir, mod);
+}
+
+bool
+LIRGeneratorMIPS::visitPowHalf(MPowHalf *ins)
+{
+ MDefinition *input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType_Double);
+ LPowHalfD *lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
+ return defineReuseInput(lir, ins, 0);
+}
+
+LTableSwitch *
+LIRGeneratorMIPS::newLTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
+ MTableSwitch *tableswitch)
+{
+ return new(alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV *
+LIRGeneratorMIPS::newLTableSwitchV(MTableSwitch *tableswitch)
+{
+ return new(alloc()) LTableSwitchV(temp(), tempFloat32(), temp(), tableswitch);
+}
+
+bool
+LIRGeneratorMIPS::visitGuardShape(MGuardShape *ins)
+{
+ MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardShape *guard = new(alloc()) LGuardShape(useRegister(ins->obj()), tempObj);
+ if (!assignSnapshot(guard, ins->bailoutKind()))
+ return false;
+ if (!add(guard, ins))
+ return false;
+ return redefine(ins, ins->obj());
+}
+
+bool
+LIRGeneratorMIPS::visitGuardObjectType(MGuardObjectType *ins)
+{
+ MOZ_ASSERT(ins->obj()->type() == MIRType_Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardObjectType *guard = new(alloc()) LGuardObjectType(useRegister(ins->obj()), tempObj);
+ if (!assignSnapshot(guard))
+ return false;
+ if (!add(guard, ins))
+ return false;
+ return redefine(ins, ins->obj());
+}
+
+bool
+LIRGeneratorMIPS::lowerUrshD(MUrsh *mir)
+{
+ MDefinition *lhs = mir->lhs();
+ MDefinition *rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType_Int32);
+ MOZ_ASSERT(rhs->type() == MIRType_Int32);
+
+ LUrshD *lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ return define(lir, mir);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSNeg(MAsmJSNeg *ins)
+{
+ if (ins->type() == MIRType_Int32)
+ return define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+
+ if (ins->type() == MIRType_Float32)
+ return define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+
+ MOZ_ASSERT(ins->type() == MIRType_Double);
+ return define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+}
+
+bool
+LIRGeneratorMIPS::lowerUDiv(MDiv *div)
+{
+ MDefinition *lhs = div->getOperand(0);
+ MDefinition *rhs = div->getOperand(1);
+
+ LUDiv *lir = new(alloc()) LUDiv;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+
+ return define(lir, div);
+}
+
+bool
+LIRGeneratorMIPS::lowerUMod(MMod *mod)
+{
+ MDefinition *lhs = mod->getOperand(0);
+ MDefinition *rhs = mod->getOperand(1);
+
+ LUMod *lir = new(alloc()) LUMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible() && !assignSnapshot(lir, Bailout_BaselineInfo))
+ return false;
+
+ return define(lir, mod);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
+ LAsmJSUInt32ToDouble *lir = new(alloc()) LAsmJSUInt32ToDouble(useRegisterAtStart(ins->input()));
+ return define(lir, ins);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
+ LAsmJSUInt32ToFloat32 *lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
+ return define(lir, ins);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
+{
+ MDefinition *ptr = ins->ptr();
+ MOZ_ASSERT(ptr->type() == MIRType_Int32);
+ LAllocation ptrAlloc;
+
+ // For MIPS it is best to keep the 'ptr' in a register if a bounds check
+ // is needed.
+ if (ptr->isConstant() && ins->skipBoundsCheck()) {
+ int32_t ptrValue = ptr->toConstant()->value().toInt32();
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(ptrValue >= 0);
+ ptrAlloc = LAllocation(ptr->toConstant()->vp());
+ } else
+ ptrAlloc = useRegisterAtStart(ptr);
+
+ return define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
+{
+ MDefinition *ptr = ins->ptr();
+ MOZ_ASSERT(ptr->type() == MIRType_Int32);
+ LAllocation ptrAlloc;
+
+ if (ptr->isConstant() && ins->skipBoundsCheck()) {
+ MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
+ ptrAlloc = LAllocation(ptr->toConstant()->vp());
+ } else
+ ptrAlloc = useRegisterAtStart(ptr);
+
+ return add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
+}
+
+bool
+LIRGeneratorMIPS::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
+{
+ return define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
+}
+
+bool
+LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32 *ins)
+{
+ MDefinition *opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType_Double);
+
+ return define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+bool
+LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32 *ins)
+{
+ MDefinition *opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType_Float32);
+
+ return define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+bool
+LIRGeneratorMIPS::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+bool
+LIRGeneratorMIPS::visitForkJoinGetSlice(MForkJoinGetSlice *ins)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.h b/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.h
new file mode 100644
index 0000000..0deab80
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Lowering-mips.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_Lowering_mips_h
+#define jit_mips_Lowering_mips_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS : public LIRGeneratorShared
+{
+ protected:
+ LIRGeneratorMIPS(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Adds a box input to an instruction, setting operand |n| to the type and
+ // |n+1| to the payload.
+ bool useBox(LInstruction *lir, size_t n, MDefinition *mir,
+ LUse::Policy policy = LUse::REGISTER, bool useAtStart = false);
+ bool useBoxFixed(LInstruction *lir, size_t n, MDefinition *mir, Register reg1, Register reg2);
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on MIPS all registers are okay.
+ LAllocation useByteOpRegister(MDefinition *mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition *mir);
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ // MIPS has a scratch register, so no need for another temp for dispatch
+ // ICs.
+ LDefinition tempForDispatchCache(MIRType outputType = MIRType_None) {
+ return LDefinition::BogusTemp();
+ }
+
+ void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
+ bool defineUntypedPhi(MPhi *phi, size_t lirIndex);
+ bool lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
+ MDefinition *rhs);
+ bool lowerUrshD(MUrsh *mir);
+
+ bool lowerForALU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir,
+ MDefinition *input);
+ bool lowerForALU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir,
+ MDefinition *lhs, MDefinition *rhs);
+
+ bool lowerForFPU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir,
+ MDefinition *src);
+ bool lowerForFPU(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir,
+ MDefinition *lhs, MDefinition *rhs);
+ bool lowerForBitAndAndBranch(LBitAndAndBranch *baab, MInstruction *mir,
+ MDefinition *lhs, MDefinition *rhs);
+ bool lowerConstantDouble(double d, MInstruction *ins);
+ bool lowerConstantFloat32(float d, MInstruction *ins);
+ bool lowerTruncateDToInt32(MTruncateToInt32 *ins);
+ bool lowerTruncateFToInt32(MTruncateToInt32 *ins);
+ bool lowerDivI(MDiv *div);
+ bool lowerModI(MMod *mod);
+ bool lowerMulI(MMul *mul, MDefinition *lhs, MDefinition *rhs);
+ bool lowerUDiv(MDiv *div);
+ bool lowerUMod(MMod *mod);
+ bool visitPowHalf(MPowHalf *ins);
+ bool visitAsmJSNeg(MAsmJSNeg *ins);
+
+ LTableSwitch *newLTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
+ MTableSwitch *ins);
+ LTableSwitchV *newLTableSwitchV(MTableSwitch *ins);
+
+ public:
+ bool visitConstant(MConstant *ins);
+ bool visitBox(MBox *box);
+ bool visitUnbox(MUnbox *unbox);
+ bool visitReturn(MReturn *ret);
+ bool lowerPhi(MPhi *phi);
+ bool visitGuardShape(MGuardShape *ins);
+ bool visitGuardObjectType(MGuardObjectType *ins);
+ bool visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
+ bool visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
+ bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
+ bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
+ bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
+ bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
+ bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
+
+ static bool allowFloat32Optimizations() {
+ return true;
+ }
+};
+
+typedef LIRGeneratorMIPS LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_Lowering_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.cpp
new file mode 100644
index 0000000..cee93a5
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -0,0 +1,3273 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/MacroAssembler-mips.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineRegisters.h"
+#include "jit/IonFrames.h"
+#include "jit/MoveEmitter.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void
+MacroAssemblerMIPS::convertBoolToInt32(Register src, Register dest)
+{
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToDouble(const Register &src, const FloatRegister &dest)
+{
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToDouble(const Address &src, FloatRegister dest)
+{
+ ma_lw(ScratchRegister, src);
+ as_mtc1(ScratchRegister, dest);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertUInt32ToDouble(const Register &src, const FloatRegister &dest)
+{
+ // We use SecondScratchFloatReg because MacroAssembler::loadFromTypedArray
+ // calls with ScratchFloatReg as dest.
+ MOZ_ASSERT(dest != SecondScratchFloatReg);
+
+ // Subtract INT32_MIN to get a positive number
+ ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+ // Convert value
+ as_mtc1(ScratchRegister, dest);
+ as_cvtdw(dest, dest);
+
+ // Add unsigned value of INT32_MIN
+ ma_lid(SecondScratchFloatReg, 2147483648.0);
+ as_addd(dest, dest, SecondScratchFloatReg);
+}
+
+void
+MacroAssemblerMIPS::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+void
+MacroAssemblerMIPS::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest)
+{
+ as_cvtsd(dest, src);
+}
+
+// Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT32_MIN/INT32_MAX, and we can test it.
+// NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
+// will be wrong.
+void
+MacroAssemblerMIPS::branchTruncateDouble(const FloatRegister &src, const Register &dest,
+ Label *fail)
+{
+ Label test, success;
+ as_truncwd(ScratchFloatReg, src);
+ as_mfc1(dest, ScratchFloatReg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS::convertDoubleToInt32(const FloatRegister &src, const Register &dest,
+ Label *fail, bool negativeZeroCheck)
+{
+ // Convert double to int, then convert back and check if we have the
+ // same number.
+ as_cvtwd(ScratchFloatReg, src);
+ as_mfc1(dest, ScratchFloatReg);
+ as_cvtdw(ScratchFloatReg, ScratchFloatReg);
+ ma_bc1d(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+ if (negativeZeroCheck) {
+ Label notZero;
+ ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
+ // Test and bail for -0.0, when integer result is 0
+ // Move the top word of the double into the output reg, if it is
+ // non-zero, then the original value was -0.0
+ moveFromDoubleHi(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ bind(¬Zero);
+ }
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS::convertFloat32ToInt32(const FloatRegister &src, const Register &dest,
+ Label *fail, bool negativeZeroCheck)
+{
+ // convert the floating point value to an integer, if it did not fit, then
+ // when we convert it *back* to a float, it will have a different value,
+ // which we can test.
+ as_cvtws(ScratchFloatReg, src);
+ as_mfc1(dest, ScratchFloatReg);
+ as_cvtsw(ScratchFloatReg, ScratchFloatReg);
+ ma_bc1s(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+ if (negativeZeroCheck) {
+ Label notZero;
+ ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
+ // Test and bail for -0.0, when integer result is 0
+ // Move the top word of the double into the output reg,
+ // if it is non-zero, then the original value was -0.0
+ moveFromDoubleHi(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ bind(¬Zero);
+ }
+}
+
+void
+MacroAssemblerMIPS::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest)
+{
+ as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPS::branchTruncateFloat32(const FloatRegister &src, const Register &dest,
+ Label *fail)
+{
+ Label test, success;
+ as_truncws(ScratchFloatReg, src);
+ as_mfc1(dest, ScratchFloatReg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToFloat32(const Register &src, const FloatRegister &dest)
+{
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToFloat32(const Address &src, FloatRegister dest)
+{
+ ma_lw(ScratchRegister, src);
+ as_mtc1(ScratchRegister, dest);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::addDouble(FloatRegister src, FloatRegister dest)
+{
+ as_addd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::subDouble(FloatRegister src, FloatRegister dest)
+{
+ as_subd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ as_muld(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::divDouble(FloatRegister src, FloatRegister dest)
+{
+ as_divd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::negateDouble(FloatRegister reg)
+{
+ as_negd(reg, reg);
+}
+
+void
+MacroAssemblerMIPS::inc64(AbsoluteAddress dest)
+{
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_lw(SecondScratchReg, ScratchRegister, 0);
+
+ as_addiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sw(SecondScratchReg, ScratchRegister, 0);
+
+ as_sltiu(SecondScratchReg, SecondScratchReg, 1);
+ as_lw(ScratchRegister, ScratchRegister, 4);
+
+ as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
+
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_sw(SecondScratchReg, ScratchRegister, 4);
+}
+
+void
+MacroAssemblerMIPS::ma_move(Register rd, Register rs)
+{
+ as_or(rd, rs, zero);
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, const ImmGCPtr &ptr)
+{
+ writeDataRelocation(ptr);
+ ma_liPatchable(dest, Imm32(ptr.value));
+}
+
+void
+MacroAssemblerMIPS::ma_li(const Register &dest, AbsoluteLabel *label)
+{
+ MOZ_ASSERT(!label->bound());
+ // Thread the patch list through the unpatched address word in the
+ // instruction stream.
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, Imm32(label->prev()));
+ label->setPrev(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, Imm32 imm)
+{
+ if (Imm16::isInSignedRange(imm.value)) {
+ as_addiu(dest, zero, imm.value);
+ } else if (Imm16::isInUnsignedRange(imm.value)) {
+ as_ori(dest, zero, Imm16::lower(imm).encode());
+ } else if (Imm16::lower(imm).encode() == 0) {
+ as_lui(dest, Imm16::upper(imm).encode());
+ } else {
+ as_lui(dest, Imm16::upper(imm).encode());
+ as_ori(dest, dest, Imm16::lower(imm).encode());
+ }
+}
+
+
+// This method generates lui and ori instruction pair that can be modified by
+// updateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
+{
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::upper(imm).encode());
+ as_ori(dest, dest, Imm16::lower(imm).encode());
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
+{
+ return ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Shifts
+void
+MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Imm32 shift)
+{
+ as_sll(rd, rt, shift.value % 32);
+}
+void
+MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Imm32 shift)
+{
+ as_srl(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Imm32 shift)
+{
+ as_sra(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, 32 - (shift.value % 32));
+}
+
+void
+MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Register shift)
+{
+ as_sllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Register shift)
+{
+ as_srlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Register shift)
+{
+ as_srav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Register shift)
+{
+ as_rotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Register shift)
+{
+ ma_negu(ScratchRegister, shift);
+ as_rotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_negu(Register rd, Register rs)
+{
+ as_subu(rd, zero, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_not(Register rd, Register rs)
+{
+ as_nor(rd, rs, zero);
+}
+
+// And.
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs)
+{
+ as_and(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs, Register rt)
+{
+ as_and(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Imm32 imm)
+{
+ ma_and(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::isInUnsignedRange(imm.value)) {
+ as_andi(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_and(rd, rs, ScratchRegister);
+ }
+}
+
+// Or.
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs)
+{
+ as_or(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs, Register rt)
+{
+ as_or(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm)
+{
+ ma_or(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::isInUnsignedRange(imm.value)) {
+ as_ori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_or(rd, rs, ScratchRegister);
+ }
+}
+
+// xor
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs)
+{
+ as_xor(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Register rt)
+{
+ as_xor(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm)
+{
+ ma_xor(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::isInUnsignedRange(imm.value)) {
+ as_xori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_xor(rd, rs, ScratchRegister);
+ }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::isInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Register rs)
+{
+ as_addu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Imm32 imm)
+{
+ ma_addu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+ Label goodAddition;
+ as_addu(SecondScratchReg, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, SecondScratchReg);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+ ma_move(rd, SecondScratchReg);
+}
+
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+ // Check for signed range because of as_addiu
+ // Check for unsigned range because of as_xori
+ if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) {
+ Label goodAddition;
+ as_addiu(SecondScratchReg, rs, imm.value);
+
+ // If different sign, no overflow
+ as_xori(ScratchRegister, rs, imm.value);
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, SecondScratchReg);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+ ma_move(rd, SecondScratchReg);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+// Subtract.
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Register rt)
+{
+ as_subu(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::isInSignedRange(-imm.value)) {
+ as_addiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_subu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Imm32 imm)
+{
+ ma_subu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+ Label goodSubtraction;
+ // Use second scratch. The instructions generated by ma_b don't use the
+ // second scratch register.
+ ma_subu(SecondScratchReg, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, SecondScratchReg);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodSubtraction);
+ ma_move(rd, SecondScratchReg);
+}
+
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+ if (imm.value != INT32_MIN) {
+ ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
+ } else {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ ma_subTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_mult(Register rs, Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ as_mult(rs, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+ as_mult(rs, rt);
+ as_mflo(rd);
+ as_sra(ScratchRegister, rd, 31);
+ as_mfhi(SecondScratchReg);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+ as_div(rs, rt);
+ as_mflo(rd);
+ as_mfhi(ScratchRegister);
+ ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+}
+
+void
+MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPS::ma_mod_mask(Register src, Register dest, Register hold, int32_t shift,
+ Label *negZero)
+{
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // ScratchRegister holds the remaining bits that have not been processed
+ // lr serves as a temporary location to store extracted bits into as well
+ // as holding the trial subtraction as a temp value dest is the
+ // accumulator (and holds the final result)
+
+ // move the whole value into the scratch register, setting the codition
+ // codes so we can muck with them later.
+ ma_move(ScratchRegister, src);
+ // Zero out the dest.
+ ma_subu(dest, dest, dest);
+ // Set the hold appropriately.
+ ma_b(ScratchRegister, ScratchRegister, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ ma_negu(ScratchRegister, ScratchRegister);
+
+ // Begin the main loop.
+ bind(&head);
+
+ // Extract the bottom bits into lr.
+ ma_and(SecondScratchReg, ScratchRegister, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_addu(dest, dest, SecondScratchReg);
+ // Do a trial subtraction, this is the same operation as cmp, but we
+ // store the dest
+ ma_subu(SecondScratchReg, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+ ma_move(dest, SecondScratchReg);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srl(ScratchRegister, ScratchRegister, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(ScratchRegister, ScratchRegister, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ ma_negu(dest, dest);
+ } else {
+ ma_negu(dest, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS::ma_load(const Register &dest, Address address,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+ if (!Imm16::isInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension)
+ as_lbu(dest, base, encodedOffset);
+ else
+ as_lb(dest, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension)
+ as_lhu(dest, base, encodedOffset);
+ else
+ as_lh(dest, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_lw(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_load");
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_load(const Register &dest, const BaseIndex &src,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Register &data, Address address, LoadStoreSize size,
+ LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+ if (!Imm16::isInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_store");
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Register &data, const BaseIndex &dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ ma_store(data, Address(SecondScratchReg, dest.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Imm32 &imm, const BaseIndex &dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ // Make sure that SecondScratchReg contains absolute address so that
+ // offset is 0.
+ computeEffectiveAddress(dest, SecondScratchReg);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(ScratchRegister, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
+}
+
+void
+MacroAssemblerMIPS::computeScaledAddress(const BaseIndex &address, Register dest)
+{
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_sll(dest, address.index, Imm32(shift));
+ as_addu(dest, address.base, dest);
+ } else {
+ as_addu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS::ma_lw(Register data, Address address)
+{
+ ma_load(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, Address address)
+{
+ ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
+{
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+
+ if (Imm16::isInSignedRange(address.offset)) {
+ as_sw(ScratchRegister, address.base, Imm16(address.offset).encode());
+ } else {
+ MOZ_ASSERT(address.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(address.offset));
+ as_addu(SecondScratchReg, address.base, SecondScratchReg);
+ as_sw(ScratchRegister, SecondScratchReg, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_pop(Register r)
+{
+ as_lw(r, StackPointer, 0);
+ as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS::ma_push(Register r)
+{
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Register rhs, Label *label, Condition c, JumpKind jumpKind)
+{
+ switch (c) {
+ case Equal :
+ case NotEqual:
+ branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual)
+ ma_b(label, jumpKind);
+ else if (c == Below)
+ ; // This condition is always false. No branch required.
+ else
+ branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label *label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
+{
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
+{
+ branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bal(Label *label, JumpKind jumpKind)
+{
+ branchWithCode(getBranchCode(BranchIsCall), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind)
+{
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::isInRange(offset))
+ jumpKind = ShortJump;
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::isInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ // Generate long jump because target is out of range of short jump.
+ if (code.encode() == inst_bgezal.encode()) {
+ // Handle long call
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jalr(ScratchRegister);
+ as_nop();
+ return;
+ }
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ label->use(bo.getOffset());
+ return;
+ }
+
+ bool conditional = (code.encode() != inst_bgezal.encode() &&
+ code.encode() != inst_beq.encode());
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ label->use(bo.getOffset());
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional)
+ as_nop();
+}
+
+Assembler::Condition
+MacroAssemblerMIPS::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
+{
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return Equal;
+ case Equal :
+ case NotEqual:
+ case Zero:
+ case NonZero:
+ case Always:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSUME_UNREACHABLE("There is a better way to compare for equality.");
+ break;
+ case Overflow:
+ MOZ_ASSUME_UNREACHABLE("Overflow condition not supported for MIPS.");
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Invalid condition for branch.");
+ }
+ return Always;
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Register rt, Condition c)
+{
+ switch (c) {
+ case Equal :
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rs, rt);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rs, rt);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rt, rs);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rs, rt);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rt, rs);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rs, rt);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rs == rt);
+ // seq d,s,$zero =>
+ // xor d,s,$zero
+ // sltiu d,d,1
+ as_xor(rd, rs, zero);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NonZero:
+ // sne d,s,$zero =>
+ // xor d,s,$zero
+ // sltu d,$zero,d
+ as_xor(rd, rs, zero);
+ as_sltu(rd, zero, rd);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Invalid condition for ma_cmp_set.");
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPS::compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind *testKind,
+ FPConditionBit fcc)
+{
+ switch (c) {
+ case DoubleOrdered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleEqual:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqual:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThan:
+ as_colt(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_cole(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThan:
+ as_colt(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqual:
+ as_cole(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleUnordered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleEqualOrUnordered:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_cult(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_cule(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrUnordered:
+ as_cult(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_cule(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Invalid DoubleCondition.");
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
+{
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
+{
+ ma_lw(ScratchRegister, addr);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
+{
+ ma_lw(ScratchRegister, lhs);
+ ma_cmp_set(dst, ScratchRegister, rhs, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS::ma_lis(FloatRegister dest, float value)
+{
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
+{
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ } ;
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.hi));
+ moveToDoubleHi(ScratchRegister, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.lo));
+ moveToDoubleLo(ScratchRegister, dest);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_liNegZero(FloatRegister dest)
+{
+ moveToDoubleLo(zero, dest);
+ ma_li(ScratchRegister, Imm32(INT_MIN));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
+{
+ moveToDoubleLo(src.payloadReg(), dest);
+ moveToDoubleHi(src.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
+{
+ if (Imm16::isInSignedRange(address.offset)) {
+ as_ls(ft, address.base, Imm16(address.offset).encode());
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
+{
+ // Use single precision load instructions so we don't have to worry about
+ // alignment.
+
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
+ as_ls(ft, address.base, Imm16(address.offset).encode());
+ as_ls(getOddPair(ft), address.base, Imm16(off2).encode());
+ } else {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
+{
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
+ as_ss(ft, address.base, Imm16(address.offset).encode());
+ as_ss(getOddPair(ft), address.base, Imm16(off2).encode());
+ } else {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address)
+{
+ computeScaledAddress(address, SecondScratchReg);
+ ma_sd(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
+{
+ if (Imm16::isInSignedRange(address.offset)) {
+ as_ss(ft, address.base, Imm16(address.offset).encode());
+ } else {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, BaseIndex address)
+{
+ computeScaledAddress(address, SecondScratchReg);
+ ma_ss(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPS::ma_pop(FloatRegister fs)
+{
+ ma_ld(fs, Address(StackPointer, 0));
+ as_addiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS::ma_push(FloatRegister fs)
+{
+ as_addiu(StackPointer, StackPointer, -sizeof(double));
+ ma_sd(fs, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label *label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+ branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label *label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+ branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+bool
+MacroAssemblerMIPSCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset)
+{
+ mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
+
+ CodeLabel cl;
+ ma_li(scratch, cl.dest());
+
+ uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+ Push(Imm32(descriptor));
+ Push(scratch);
+
+ bind(cl.src());
+ *offset = currentOffset();
+
+ MOZ_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size());
+ return addCodeLabel(cl);
+}
+
+bool
+MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void *fakeReturnAddr)
+{
+ DebugOnly<uint32_t> initialDepth = framePushed();
+ uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+
+ Push(Imm32(descriptor)); // descriptor_
+ Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void
+MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target)
+{
+ uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+ Push(Imm32(descriptor)); // descriptor
+
+ addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ ma_callIonHalfPush(ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target, Register dynStack)
+{
+ ma_addu(dynStack, dynStack, Imm32(framePushed()));
+ makeFrameDescriptor(dynStack, JitFrame_IonJS);
+ Push(dynStack); // descriptor
+
+ addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ ma_callIonHalfPush(ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::callIon(const Register &callee)
+{
+ MOZ_ASSERT((framePushed() & 3) == 0);
+ if ((framePushed() & 7) == 4) {
+ ma_callIonHalfPush(callee);
+ } else {
+ adjustFrame(sizeof(uint32_t));
+ ma_callIon(callee);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::reserveStack(uint32_t amount)
+{
+ if (amount)
+ ma_subu(StackPointer, StackPointer, Imm32(amount));
+ adjustFrame(amount);
+}
+
+void
+MacroAssemblerMIPSCompat::freeStack(uint32_t amount)
+{
+ MOZ_ASSERT(amount <= framePushed_);
+ if (amount)
+ ma_addu(StackPointer, StackPointer, Imm32(amount));
+ adjustFrame(-amount);
+}
+
+void
+MacroAssemblerMIPSCompat::freeStack(Register amount)
+{
+ as_addu(StackPointer, StackPointer, amount);
+}
+
+void
+MacroAssembler::PushRegsInMask(RegisterSet set)
+{
+ int32_t diffF = set.fpus().size() * sizeof(double);
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ // Double values have to be aligned. We reserve extra space so that we can
+ // start writing from the first aligned location.
+ // We reserve a whole extra double so that the buffer has even size.
+ ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
+ reserveStack(diffF + sizeof(double));
+
+ for (FloatRegisterForwardIterator iter(set.fpus()); iter.more(); iter++) {
+ // Use assembly s.d because we have alligned the stack.
+ // :TODO: (Bug 972836) Fix this once odd regs can be used as
+ // float32 only. For now we skip saving odd regs for O32 ABI.
+
+ // :TODO: (Bug 985881) Make a switch for N32 ABI.
+ if ((*iter).code() % 2 == 0)
+ as_sd(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ MOZ_ASSERT(diffF == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore)
+{
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().size() * sizeof(double);
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ // Read the buffer form the first aligned location.
+ ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
+ ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
+
+ for (FloatRegisterForwardIterator iter(set.fpus()); iter.more(); iter++) {
+ // :TODO: (Bug 972836) Fix this once odd regs can be used as
+ // float32 only. For now we skip loading odd regs for O32 ABI.
+
+ // :TODO: (Bug 985881) Make a switch for N32 ABI.
+ if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
+ // Use assembly l.d because we have alligned the stack.
+ as_ld(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ freeStack(reservedF + sizeof(double));
+ MOZ_ASSERT(diffF == 0);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ freeStack(reservedG);
+ MOZ_ASSERT(diffG == 0);
+}
+
+void
+MacroAssemblerMIPSCompat::add32(Register src, Register dest)
+{
+ as_addu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::add32(Imm32 imm, Register dest)
+{
+ ma_addu(dest, dest, imm);
+}
+
+void
+
+MacroAssemblerMIPSCompat::add32(Imm32 imm, const Address &dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_addu(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::sub32(Imm32 imm, Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::sub32(Register src, Register dest)
+{
+ ma_subu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Register src, Register dest)
+{
+ ma_addu(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(const Address &src, Register dest)
+{
+ loadPtr(src, ScratchRegister);
+ ma_addu(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::subPtr(Register src, Register dest)
+{
+ ma_subu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::not32(Register reg)
+{
+ ma_not(reg, reg);
+}
+
+// Logical operations
+void
+MacroAssemblerMIPSCompat::and32(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::and32(Imm32 imm, const Address &dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_and(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::or32(Imm32 imm, const Address &dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_or(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::xor32(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::xorPtr(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::xorPtr(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::orPtr(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::orPtr(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::andPtr(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::andPtr(Register src, Register dest)
+{
+ ma_and(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(const Imm32 &imm, const Register &dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(const Register &src, const Register &dest)
+{
+ ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(const Register &src, const Register &dest)
+{
+ ma_move(dest, src);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmWord &imm, const Register &dest)
+{
+ ma_li(dest, Imm32(imm.value));
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmGCPtr &imm, const Register &dest)
+{
+ ma_li(dest, imm);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmPtr &imm, const Register &dest)
+{
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest)
+{
+ MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const Address &address, const Register &dest)
+{
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest)
+{
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const Address &address, const Register &dest)
+{
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex &src, const Register &dest)
+{
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const Address &address, const Register &dest)
+{
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest)
+{
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const Address &address, const Register &dest)
+{
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex &src, const Register &dest)
+{
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const Address &address, const Register &dest)
+{
+ ma_lw(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const BaseIndex &address, const Register &dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const AbsoluteAddress &address, const Register &dest)
+{
+ ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
+ as_lw(dest, ScratchRegister, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const Address &address, const Register &dest)
+{
+ ma_lw(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const BaseIndex &src, const Register &dest)
+{
+ load32(src, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const AbsoluteAddress &address, const Register &dest)
+{
+ ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
+ as_lw(dest, ScratchRegister, 0);
+}
+void
+MacroAssemblerMIPSCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest)
+{
+ movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0x0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPrivate(const Address &address, const Register &dest)
+{
+ ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const Address &address, const FloatRegister &dest)
+{
+ ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ld(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest)
+{
+ ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest)
+{
+ loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const Address &address, const FloatRegister &dest)
+{
+ ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ls(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const Address &address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Register &src, const Address &address)
+{
+ ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const BaseIndex &dest)
+{
+ ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Register &src, const BaseIndex &dest)
+{
+ ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const Address &address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Register &src, const Address &address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const BaseIndex &dest)
+{
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Register &src, const BaseIndex &address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const AbsoluteAddress &address)
+{
+ storePtr(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const Address &address)
+{
+ storePtr(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Imm32 &src, const Address &address)
+{
+ move32(src, ScratchRegister);
+ storePtr(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Imm32 &imm, const BaseIndex &dest)
+{
+ ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const BaseIndex &dest)
+{
+ ma_store(src, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmWord imm, const Address &address)
+{
+ ma_li(ScratchRegister, Imm32(imm.value));
+ ma_sw(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, const Address &address)
+{
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, const Address &address)
+{
+ ma_li(ScratchRegister, imm);
+ ma_sw(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const Address &address)
+{
+ ma_sw(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(const Register &src, const AbsoluteAddress &dest)
+{
+ ma_li(ScratchRegister, Imm32((uint32_t)dest.addr));
+ as_sw(src, ScratchRegister, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::subPtr(Imm32 imm, const Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Register dest)
+{
+ ma_addu(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Address &dest)
+{
+ loadPtr(dest, ScratchRegister);
+ addPtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs,
+ const FloatRegister &rhs, Label *label)
+{
+ ma_bc1d(lhs, rhs, label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs,
+ const FloatRegister &rhs, Label *label)
+{
+ ma_bc1s(lhs, rhs, label, cond);
+}
+
+// higher level tag testing code
+Operand
+ToPayload(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand
+ToType(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const Address &address, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(address, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+void
+MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const ValueOperand &value,
+ Label *label)
+{
+ branchTestPrimitive(cond, value.typeReg(), label);
+}
+void
+MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const ValueOperand &value, Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Address &address, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(address, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const ValueOperand &value,
+ Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_b(tag, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const ValueOperand &value, Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Address &address, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(address, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const ValueOperand &value, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const ValueOperand &value, Label *label)
+{
+ branchTestObject(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const ValueOperand &value, Label *label)
+{
+ branchTestString(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const ValueOperand &value,
+ Label *label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Address &address, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(address, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const ValueOperand &value, Label *label)
+{
+ branchTestNumber(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const ValueOperand &value, Label *label)
+{
+ branchTestMagic(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Register &tag, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Address &address, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(address, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const BaseIndex &src, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ extractTag(src, SecondScratchReg);
+ ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const ValueOperand &value,
+ const Value &v, Label *label)
+{
+ moveData(v, ScratchRegister);
+
+ if (cond == Equal) {
+ Label done;
+ ma_b(value.payloadReg(), ScratchRegister, &done, NotEqual, ShortJump);
+ {
+ ma_b(value.typeReg(), Imm32(getType(v)), label, Equal);
+ }
+ bind(&done);
+ } else {
+ MOZ_ASSERT(cond == NotEqual);
+ ma_b(value.payloadReg(), ScratchRegister, label, NotEqual);
+
+ ma_b(value.typeReg(), Imm32(getType(v)), label, NotEqual);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const Address &valaddr,
+ const ValueOperand &value, Label *label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ // Load tag.
+ ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + TAG_OFFSET));
+ branchPtr(cond, ScratchRegister, value.typeReg(), label);
+
+ // Load payload
+ ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + PAYLOAD_OFFSET));
+ branchPtr(cond, ScratchRegister, value.payloadReg(), label);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand &operand, const Register &dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const Address &src, const Register &dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand &operand, const Register &dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const Address &src, const Register &dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+ MOZ_ASSERT(dest != ScratchFloatReg);
+ moveToDoubleLo(operand.payloadReg(), dest);
+ moveToDoubleHi(operand.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const Address &src, const FloatRegister &dest)
+{
+ ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ moveToDoubleLo(ScratchRegister, dest);
+ ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const ValueOperand &operand, const Register &dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const Address &src, const Register &dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const ValueOperand &src, const Register &dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxValue(const ValueOperand &src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ branchTestInt32(Assembler::NotEqual, src, ¬Int32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(¬Int32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ ma_move(dest.gpr(), src.payloadReg());
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand &src, Register dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, const Register &src,
+ const ValueOperand &dest)
+{
+ if (src != dest.payloadReg())
+ ma_move(dest.payloadReg(), src);
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+ convertBoolToInt32(ScratchRegister, operand.payloadReg());
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand &operand,
+ const FloatRegister &dest)
+{
+ convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand &operand,
+ const FloatRegister &dest)
+{
+
+ convertBoolToInt32(ScratchRegister, operand.payloadReg());
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand &operand,
+ const FloatRegister &dest)
+{
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(float f, const FloatRegister &dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address &src, const FloatRegister &dest)
+{
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+ branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(¬Int32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+ const FloatRegister &dest, int32_t shift)
+{
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+ branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(¬Int32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(double dp, const FloatRegister &dest)
+{
+ ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32Truthy(bool b, const ValueOperand &value, Label *label)
+{
+ ma_and(ScratchRegister, value.payloadReg(), value.payloadReg());
+ ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestStringTruthy(bool b, const ValueOperand &value, Label *label)
+{
+ Register string = value.payloadReg();
+ size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT);
+ ma_lw(SecondScratchReg, Address(string, JSString::offsetOfLengthAndFlags()));
+
+ // Use SecondScratchReg because ma_and will clobber ScratchRegister
+ ma_and(ScratchRegister, SecondScratchReg, Imm32(mask));
+ ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label)
+{
+ ma_lid(ScratchFloatReg, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc1d(value, ScratchFloatReg, label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestBooleanTruthy(bool b, const ValueOperand &operand,
+ Label *label)
+{
+ ma_b(operand.payloadReg(), operand.payloadReg(), label, b ? NonZero : Zero);
+}
+
+Register
+MacroAssemblerMIPSCompat::extractObject(const Address &address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const Address &address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const BaseIndex &address, Register scratch)
+{
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+
+uint32_t
+MacroAssemblerMIPSCompat::getType(const Value &val)
+{
+ jsval_layout jv = JSVAL_TO_IMPL(val);
+ return jv.s.tag;
+}
+
+void
+MacroAssemblerMIPSCompat::moveData(const Value &val, Register data)
+{
+ jsval_layout jv = JSVAL_TO_IMPL(val);
+ if (val.isMarkable())
+ ma_li(data, ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
+ else
+ ma_li(data, Imm32(jv.s.payload.i32));
+}
+
+void
+MacroAssemblerMIPSCompat::moveValue(const Value &val, Register type, Register data)
+{
+ MOZ_ASSERT(type != data);
+ ma_li(type, Imm32(getType(val)));
+ moveData(val, data);
+}
+void
+MacroAssemblerMIPSCompat::moveValue(const Value &val, const ValueOperand &dest)
+{
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel *label)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+ addLongJump(bo);
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffsetJump(bo.getOffset());
+}
+
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
+{
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex &dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::isInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address &dest)
+{
+ ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value &val, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(getType(val)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::isInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(const BaseIndex &addr, ValueOperand val)
+{
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
+{
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory.
+ if (src.base != val.payloadReg()) {
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ } else {
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+ MOZ_ASSERT(payload != dest.typeReg());
+ ma_li(dest.typeReg(), ImmType(type));
+ if (payload != dest.payloadReg())
+ ma_move(dest.payloadReg(), payload);
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
+{
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // Store type and payload.
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(const Address &addr)
+{
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // Store type and payload.
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::popValue(ValueOperand val)
+{
+ // Load payload and type.
+ as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+ as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+ // Free stack.
+ as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value &val, Address dest)
+{
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
+{
+ ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ return;
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value &val, Register base, Register index,
+ int32_t shift)
+{
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+
+ moveData(val, ScratchRegister);
+
+ as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Register base, Register index, int32_t shift)
+{
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
+{
+ ma_li(SecondScratchReg, tag);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift)
+{
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ ma_li(ScratchRegister, tag);
+ as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::linkExitFrame()
+{
+ uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop();
+ movePtr(ImmPtr(dest), ScratchRegister);
+ ma_sw(StackPointer, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::linkParallelExitFrame(const Register &pt)
+{
+ ma_sw(StackPointer, Address(pt, offsetof(PerThreadData, ionTop)));
+}
+
+// This macrosintruction calls the ion code and pushes the return address to
+// the stack in the case when stack is alligned.
+void
+MacroAssemblerMIPS::ma_callIon(const Register r)
+{
+ // This is a MIPS hack to push return address during jalr delay slot.
+ as_addiu(StackPointer, StackPointer, -2 * sizeof(intptr_t));
+ as_jalr(r);
+ as_sw(ra, StackPointer, 0);
+}
+
+// This macrosintruction calls the ion code and pushes the return address to
+// the stack in the case when stack is not alligned.
+void
+MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
+{
+ // This is a MIPS hack to push return address during jalr delay slot.
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_jalr(r);
+ as_sw(ra, StackPointer, 0);
+}
+
+void
+MacroAssemblerMIPS::ma_call(ImmPtr dest)
+{
+ ma_liPatchable(CallReg, dest);
+ as_jalr(CallReg);
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS::ma_jump(ImmPtr dest)
+{
+ ma_liPatchable(ScratchRegister, dest);
+ as_jr(ScratchRegister);
+ as_nop();
+}
+
+void
+MacroAssemblerMIPSCompat::breakpoint()
+{
+ as_break(0);
+}
+
+void
+MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand &source, FloatRegister dest,
+ Label *failure)
+{
+ Label isDouble, done;
+ branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::setupABICall(uint32_t args)
+{
+ MOZ_ASSERT(!inCall_);
+ inCall_ = true;
+ args_ = args;
+ passedArgs_ = 0;
+
+ usedArgSlots_ = 0;
+ firstArgType = MoveOp::GENERAL;
+}
+
+void
+MacroAssemblerMIPSCompat::setupAlignedABICall(uint32_t args)
+{
+ setupABICall(args);
+
+ dynamicAlignment_ = false;
+}
+
+void
+MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, const Register &scratch)
+{
+ setupABICall(args);
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
+ ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
+ as_sw(scratch, StackPointer, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
+{
+ ++passedArgs_;
+ if (!enoughMemory_)
+ return;
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (!usedArgSlots_) {
+ if (from.floatReg() != f12)
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
+ firstArgType = MoveOp::FLOAT32;
+ } else if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
+ (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
+ if (from.floatReg() != f14)
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
+ } else {
+ Register destReg;
+ if (GetIntArgReg(usedArgSlots_, &destReg)) {
+ if (from.isGeneralReg() && from.reg() == destReg) {
+ // Nothing to do. Value is in the right register already
+ } else {
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
+ }
+ } else {
+ uint32_t disp = GetArgStackDisp(usedArgSlots_);
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+ }
+ }
+ usedArgSlots_++;
+ break;
+ case MoveOp::DOUBLE:
+ if (!usedArgSlots_) {
+ if (from.floatReg() != f12)
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
+ usedArgSlots_ = 2;
+ firstArgType = MoveOp::DOUBLE;
+ } else if (usedArgSlots_ <= 2) {
+ if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
+ (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
+ if (from.floatReg() != f14)
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
+ } else {
+ // Create two moves so that cycles are found. Move emitter
+ // will have special case to handle this.
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a2), type);
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a3), type);
+ }
+ usedArgSlots_ = 4;
+ } else {
+ // Align if necessary
+ usedArgSlots_ += usedArgSlots_ % 2;
+
+ uint32_t disp = GetArgStackDisp(usedArgSlots_);
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+ usedArgSlots_ += 2;
+ }
+ break;
+ case MoveOp::GENERAL:
+ Register destReg;
+ if (GetIntArgReg(usedArgSlots_, &destReg)) {
+ if (from.isGeneralReg() && from.reg() == destReg) {
+ // Nothing to do. Value is in the right register already
+ } else {
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
+ }
+ } else {
+ uint32_t disp = GetArgStackDisp(usedArgSlots_);
+ enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+ }
+ usedArgSlots_++;
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const Register ®)
+{
+ passABIArg(MoveOperand(reg), MoveOp::GENERAL);
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type)
+{
+ passABIArg(MoveOperand(freg), type);
+}
+
+void MacroAssemblerMIPSCompat::checkStackAlignment()
+{
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, StackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(MAX_BREAK_CODE);
+ bind(&aligned);
+#endif
+}
+
+void
+MacroAssemblerMIPSCompat::alignPointerUp(Register src, Register dest, uint32_t alignment)
+{
+ MOZ_ASSERT(alignment > 1);
+ ma_addu(dest, src, Imm32(alignment - 1));
+ ma_and(dest, dest, Imm32(~(alignment - 1)));
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust)
+{
+ MOZ_ASSERT(inCall_);
+
+ // Reserve place for $ra.
+ *stackAdjust = sizeof(intptr_t);
+
+ *stackAdjust += usedArgSlots_ > NumIntArgRegs ?
+ usedArgSlots_ * sizeof(intptr_t) :
+ NumIntArgRegs * sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
+ } else {
+ *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment);
+ }
+
+ reserveStack(*stackAdjust);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from BaselineIC.
+ // Maybe we can do this differently.
+ ma_sw(ra, Address(StackPointer, *stackAdjust - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ checkStackAlignment();
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ // Restore ra value (as stored in callWithABIPre()).
+ ma_lw(ra, Address(StackPointer, stackAdjust - sizeof(intptr_t)));
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ ma_lw(StackPointer, Address(StackPointer, stackAdjust));
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(void *fun, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ ma_call(ImmPtr(fun));
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
+{
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(imm);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(const Address &fun, MoveOp::Type result)
+{
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_lw(t9, Address(fun.base, fun.offset));
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
+{
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
+ ma_subu(StackPointer, StackPointer, Imm32(size));
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Ask for an exception handler.
+ setupUnalignedABICall(1, a1);
+ passABIArg(a0);
+ callWithABI(handler);
+
+ JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail();
+ branch(excTail);
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandlerTail()
+{
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ // Already clobbered a0, so use it...
+ ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, kind)));
+ branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
+ branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+ branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, target)));
+ ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
+ ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1, a2);
+ loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+ ma_lw(a0, Address(sp, offsetof(ResumeFromException, target)));
+ ma_lw(BaselineFrameReg, Address(sp, offsetof(ResumeFromException, framePointer)));
+ ma_lw(sp, Address(sp, offsetof(ResumeFromException, stackPointer)));
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(a0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
+ ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+ loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, BaselineFrameReg);
+ pop(BaselineFrameReg);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ ma_lw(a2, Address(sp, offsetof(ResumeFromException, bailoutInfo)));
+ ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+ ma_lw(a1, Address(sp, offsetof(ResumeFromException, target)));
+ jump(a1);
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPSCompat::toggledJump(Label *label)
+{
+ CodeOffsetLabel ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPSCompat::toggledCall(JitCode *target, bool enabled)
+{
+ BufferOffset bo = nextOffset();
+ CodeOffsetLabel offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize());
+ return offset;
+}
+
+void
+MacroAssemblerMIPSCompat::branchPtrInNurseryRange(Register ptr, Register temp, Label *label)
+{
+ JS_ASSERT(temp != InvalidReg);
+ const Nursery &nursery = GetIonContext()->runtime->gcNursery();
+
+ // ptr and temp may be the same register, in which case we mustn't trash it
+ // before we use its contents.
+ if (ptr == temp) {
+ addPtr(ImmWord(-ptrdiff_t(nursery.start())), ptr);
+ branchPtr(Assembler::Below, ptr, Imm32(Nursery::NurserySize), label);
+ } else {
+ movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp);
+ addPtr(ptr, temp);
+ branchPtr(Assembler::Below, temp, Imm32(Nursery::NurserySize), label);
+ }
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.h b/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.h
new file mode 100644
index 0000000..ed1f566
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/MacroAssembler-mips.h
@@ -0,0 +1,1156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_MacroAssembler_mips_h
+#define jit_mips_MacroAssembler_mips_h
+
+#include "jsopcode.h"
+
+#include "jit/IonCaches.h"
+#include "jit/IonFrames.h"
+#include "jit/mips/Assembler-mips.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize
+{
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension
+{
+ ZeroExtend = 0,
+ SignExtend = 1
+};
+
+enum JumpKind
+{
+ LongJump = 0,
+ ShortJump = 1
+};
+
+struct ImmTag : public Imm32
+{
+ ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static Register CallReg = t9;
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(jsval), "The defaultShift is wrong");
+
+class MacroAssemblerMIPS : public Assembler
+{
+ public:
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
+ void convertInt32ToDouble(const Address &src, FloatRegister dest);
+ void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
+ void convertUInt32ToFloat32(const Register &src, const FloatRegister &dest);
+ void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest);
+ void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
+ void convertDoubleToInt32(const FloatRegister &src, const Register &dest, Label *fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(const FloatRegister &src, const Register &dest, Label *fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest);
+ void branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail);
+ void convertInt32ToFloat32(const Register &src, const FloatRegister &dest);
+ void convertInt32ToFloat32(const Address &src, FloatRegister dest);
+
+
+ void addDouble(FloatRegister src, FloatRegister dest);
+ void subDouble(FloatRegister src, FloatRegister dest);
+ void mulDouble(FloatRegister src, FloatRegister dest);
+ void divDouble(FloatRegister src, FloatRegister dest);
+
+ void negateDouble(FloatRegister reg);
+ void inc64(AbsoluteAddress dest);
+
+ public:
+
+ void ma_move(Register rd, Register rs);
+
+ void ma_li(Register dest, const ImmGCPtr &ptr);
+
+ void ma_li(const Register &dest, AbsoluteLabel *label);
+
+ void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+
+ // Shift operations
+ void ma_sll(Register rd, Register rt, Imm32 shift);
+ void ma_srl(Register rd, Register rt, Imm32 shift);
+ void ma_sra(Register rd, Register rt, Imm32 shift);
+ void ma_ror(Register rd, Register rt, Imm32 shift);
+ void ma_rol(Register rd, Register rt, Imm32 shift);
+
+ void ma_sll(Register rd, Register rt, Register shift);
+ void ma_srl(Register rd, Register rt, Register shift);
+ void ma_sra(Register rd, Register rt, Register shift);
+ void ma_ror(Register rd, Register rt, Register shift);
+ void ma_rol(Register rd, Register rt, Register shift);
+
+ // Negate
+ void ma_negu(Register rd, Register rs);
+
+ void ma_not(Register rd, Register rs);
+
+ // and
+ void ma_and(Register rd, Register rs);
+ void ma_and(Register rd, Register rs, Register rt);
+ void ma_and(Register rd, Imm32 imm);
+ void ma_and(Register rd, Register rs, Imm32 imm);
+
+ // or
+ void ma_or(Register rd, Register rs);
+ void ma_or(Register rd, Register rs, Register rt);
+ void ma_or(Register rd, Imm32 imm);
+ void ma_or(Register rd, Register rs, Imm32 imm);
+
+ // xor
+ void ma_xor(Register rd, Register rs);
+ void ma_xor(Register rd, Register rs, Register rt);
+ void ma_xor(Register rd, Imm32 imm);
+ void ma_xor(Register rd, Register rs, Imm32 imm);
+
+ // load
+ void ma_load(const Register &dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load(const Register &dest, const BaseIndex &src, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(const Register &data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(const Register &data, const BaseIndex &dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(const Imm32 &imm, const BaseIndex &dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ void computeScaledAddress(const BaseIndex &address, Register dest);
+
+ void computeEffectiveAddress(const Address &address, Register dest) {
+ ma_addu(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex &address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ ma_addu(dest, dest, Imm32(address.offset));
+ }
+ }
+
+ // arithmetic based ops
+ // add
+ void ma_addu(Register rd, Register rs, Imm32 imm);
+ void ma_addu(Register rd, Register rs);
+ void ma_addu(Register rd, Imm32 imm);
+ void ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow);
+ void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+ // subtract
+ void ma_subu(Register rd, Register rs, Register rt);
+ void ma_subu(Register rd, Register rs, Imm32 imm);
+ void ma_subu(Register rd, Imm32 imm);
+ void ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow);
+ void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mult(Register rs, Imm32 imm);
+ void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow);
+ void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label *overflow);
+ void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, int32_t shift,
+ Label *negZero = nullptr);
+
+ // memory
+ // shortcut for when we know we're transferring 32 bits of data
+ void ma_lw(Register data, Address address);
+
+ void ma_sw(Register data, Address address);
+ void ma_sw(Imm32 imm, Address address);
+
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, Register rhs, Label *l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label *l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label *l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label *l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Label *l, JumpKind jumpKind = LongJump);
+ void ma_bal(Label *l, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+ void ma_lid(FloatRegister dest, double value);
+ void ma_liNegZero(FloatRegister dest);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister fd, Address address);
+ void ma_ld(FloatRegister fd, Address address);
+ void ma_sd(FloatRegister fd, Address address);
+ void ma_sd(FloatRegister fd, BaseIndex address);
+ void ma_ss(FloatRegister fd, Address address);
+ void ma_ss(FloatRegister fd, BaseIndex address);
+
+ void ma_pop(FloatRegister fs);
+ void ma_push(FloatRegister fs);
+
+ //FP branches
+ void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label *label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label *label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+
+
+ // These fuctions abstract the access to high part of the double precision
+ // float register. It is intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ // :TODO: (Bug 985881) Modify this for N32 ABI to use mthc1 and mfhc1
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mtc1(src, getOddPair(dest));
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfc1(dest, getOddPair(src));
+ }
+
+ void moveToDoubleLo(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromFloat32(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ protected:
+ void branchWithCode(InstImm code, Label *label, JumpKind jumpKind);
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind *testKind,
+ FPConditionBit fcc = FCC0);
+
+ public:
+ // calls an Ion function, assumes that the stack is untouched (8 byte alinged)
+ void ma_callIon(const Register reg);
+ // callso an Ion function, assuming that sp has already been decremented
+ void ma_callIonNoPush(const Register reg);
+ // calls an ion function, assuming that the stack is currently not 8 byte aligned
+ void ma_callIonHalfPush(const Register reg);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set(Register rd, Register rs, Address addr, Condition c);
+ void ma_cmp_set(Register dst, Address lhs, Register imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+};
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
+{
+ // Number of bytes the stack is adjusted inside a call to C. Calls to C may
+ // not be nested.
+ bool inCall_;
+ uint32_t args_;
+ // The actual number of arguments that were passed, used to assert that
+ // the initial number of arguments declared was correct.
+ uint32_t passedArgs_;
+
+ uint32_t usedArgSlots_;
+ MoveOp::Type firstArgType;
+
+ bool dynamicAlignment_;
+
+ bool enoughMemory_;
+ // Compute space needed for the function call and set the properties of the
+ // callee. It returns the space which has to be allocated for calling the
+ // function.
+ //
+ // arg Number of arguments of the function.
+ void setupABICall(uint32_t arg);
+
+ protected:
+ MoveResolver moveResolver_;
+
+ // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
+ // needed to compute offsets to stack slots while temporary space has been
+ // reserved for unexpected spills or C++ function calls. It is maintained
+ // by functions which track stack alignment, which for clear distinction
+ // use StudlyCaps (for example, Push, Pop).
+ uint32_t framePushed_;
+ void adjustFrame(int value) {
+ setFramePushed(framePushed_ + value);
+ }
+ public:
+ MacroAssemblerMIPSCompat()
+ : inCall_(false),
+ enoughMemory_(true),
+ framePushed_(0)
+ { }
+ bool oom() const {
+ return Assembler::oom();
+ }
+
+ public:
+ using MacroAssemblerMIPS::call;
+
+ void j(Label *dest) {
+ ma_b(dest);
+ }
+
+ void mov(Register src, Register dest) {
+ as_or(dest, src, zero);
+ }
+ void mov(ImmWord imm, Register dest) {
+ ma_li(dest, Imm32(imm.value));
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(Register src, Address dest) {
+ MOZ_ASSUME_UNREACHABLE("NYI-IC");
+ }
+ void mov(Address src, Register dest) {
+ MOZ_ASSUME_UNREACHABLE("NYI-IC");
+ }
+
+ void call(const Register reg) {
+ as_jalr(reg);
+ as_nop();
+ }
+
+ void call(Label *label) {
+ // for now, assume that it'll be nearby?
+ ma_bal(label);
+ }
+
+ void call(ImmWord imm) {
+ call(ImmPtr((void*)imm.value));
+ }
+ void call(ImmPtr imm) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, imm, Relocation::HARDCODED);
+ ma_call(imm);
+ }
+ void call(AsmJSImmPtr imm) {
+ movePtr(imm, CallReg);
+ call(CallReg);
+ }
+ void call(JitCode *c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
+ ma_callIonHalfPush(ScratchRegister);
+ }
+ void branch(JitCode *c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() {
+ as_nop();
+ }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ void retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ ma_lw(ra, Address(StackPointer, 0));
+ ma_addu(StackPointer, StackPointer, n);
+ as_jr(ra);
+ as_nop();
+ }
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address &address) {
+ ma_lw(ScratchRegister, address);
+ ma_push(ScratchRegister);
+ }
+ void push(const Register ®) {
+ ma_push(reg);
+ }
+ void push(const FloatRegister ®) {
+ ma_push(reg);
+ }
+ void pop(const Register ®) {
+ ma_pop(reg);
+ }
+ void pop(const FloatRegister ®) {
+ ma_pop(reg);
+ }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffsetLabel toggledJump(Label *label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
+
+ static size_t ToggledCallSize() {
+ // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffsetLabel pushWithPatch(ImmWord imm) {
+ CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return label;
+ }
+
+ CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
+ CodeOffsetLabel label = currentOffset();
+ ma_liPatchable(dest, Imm32(imm.value));
+ return label;
+ }
+ CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void jump(Label *label) {
+ ma_b(label);
+ }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address &address) {
+ ma_lw(ScratchRegister, address);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void neg32(Register reg) {
+ ma_negu(reg, reg);
+ }
+ void negl(Register reg) {
+ ma_negu(reg, reg);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand &value) {
+ return value.typeReg();
+ }
+
+ void branchTestGCThing(Condition cond, const Address &address, Label *label);
+ void branchTestGCThing(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestPrimitive(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestPrimitive(Condition cond, const Register &tag, Label *label);
+
+ void branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label);
+ void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
+ Label *label);
+
+ // unboxing code
+ void unboxInt32(const ValueOperand &operand, const Register &dest);
+ void unboxInt32(const Address &src, const Register &dest);
+ void unboxBoolean(const ValueOperand &operand, const Register &dest);
+ void unboxBoolean(const Address &src, const Register &dest);
+ void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
+ void unboxDouble(const Address &src, const FloatRegister &dest);
+ void unboxString(const ValueOperand &operand, const Register &dest);
+ void unboxString(const Address &src, const Register &dest);
+ void unboxObject(const ValueOperand &src, const Register &dest);
+ void unboxValue(const ValueOperand &src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand &src, Register dest);
+
+ void notBoolean(const ValueOperand &val) {
+ as_xori(val.payloadReg(), val.payloadReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(const FloatRegister &src, const ValueOperand &dest);
+ void boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address &address, Register scratch);
+ Register extractObject(const ValueOperand &value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand &value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand &value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address &address, Register scratch);
+ Register extractTag(const BaseIndex &address, Register scratch);
+ Register extractTag(const ValueOperand &value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
+ void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
+ void loadInt32OrDouble(const Address &address, const FloatRegister &dest);
+ void loadInt32OrDouble(Register base, Register index,
+ const FloatRegister &dest, int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, const FloatRegister &dest);
+
+ void boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
+ void int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
+ void loadConstantFloat32(float f, const FloatRegister &dest);
+
+ void branchTestInt32(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestInt32(Condition cond, const Register &tag, Label *label);
+ void branchTestInt32(Condition cond, const Address &address, Label *label);
+ void branchTestInt32(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestBoolean(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestBoolean(Condition cond, const Register &tag, Label *label);
+ void branchTestBoolean(Condition cond, const BaseIndex &src, Label *label);
+
+ void branch32(Condition cond, Register lhs, Register rhs, Label *label) {
+ ma_b(lhs, rhs, label, cond);
+ }
+ void branch32(Condition cond, Register lhs, Imm32 imm, Label *label) {
+ ma_b(lhs, imm, label, cond);
+ }
+ void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
+ if (lhs.getTag() == Operand::REG) {
+ ma_b(lhs.toReg(), rhs, label, cond);
+ } else {
+ branch32(cond, lhs.toAddress(), rhs, label);
+ }
+ }
+ void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) {
+ if (lhs.getTag() == Operand::REG) {
+ ma_b(lhs.toReg(), rhs, label, cond);
+ } else {
+ branch32(cond, lhs.toAddress(), rhs, label);
+ }
+ }
+ void branch32(Condition cond, const Address &lhs, Register rhs, Label *label) {
+ ma_lw(ScratchRegister, lhs);
+ ma_b(ScratchRegister, rhs, label, cond);
+ }
+ void branch32(Condition cond, const Address &lhs, Imm32 rhs, Label *label) {
+ ma_lw(SecondScratchReg, lhs);
+ ma_b(SecondScratchReg, rhs, label, cond);
+ }
+ void branchPtr(Condition cond, const Address &lhs, Register rhs, Label *label) {
+ branch32(cond, lhs, rhs, label);
+ }
+
+ void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
+ branchPtr(cond, lhs, ptr, label);
+ }
+
+ void branchPrivatePtr(Condition cond, const Address &lhs, Register ptr, Label *label) {
+ branchPtr(cond, lhs, ptr, label);
+ }
+
+ void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label *label) {
+ branchPtr(cond, lhs, ptr, label);
+ }
+
+ void branchTestDouble(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestDouble(Condition cond, const Register &tag, Label *label);
+ void branchTestDouble(Condition cond, const Address &address, Label *label);
+ void branchTestDouble(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestNull(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestNull(Condition cond, const Register &tag, Label *label);
+ void branchTestNull(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestObject(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestObject(Condition cond, const Register &tag, Label *label);
+ void branchTestObject(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestString(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestString(Condition cond, const Register &tag, Label *label);
+ void branchTestString(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestUndefined(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestUndefined(Condition cond, const Register &tag, Label *label);
+ void branchTestUndefined(Condition cond, const BaseIndex &src, Label *label);
+ void branchTestUndefined(Condition cond, const Address &address, Label *label);
+
+ void branchTestNumber(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestNumber(Condition cond, const Register &tag, Label *label);
+
+ void branchTestMagic(Condition cond, const ValueOperand &value, Label *label);
+ void branchTestMagic(Condition cond, const Register &tag, Label *label);
+ void branchTestMagic(Condition cond, const Address &address, Label *label);
+ void branchTestMagic(Condition cond, const BaseIndex &src, Label *label);
+
+ void branchTestMagicValue(Condition cond, const ValueOperand &val, JSWhyMagic why,
+ Label *label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ // Test for magic
+ Label notmagic;
+ branchTestMagic(cond, val, ¬magic);
+ // Test magic value
+ branch32(cond, val.payloadReg(), Imm32(static_cast<int32_t>(why)), label);
+ bind(¬magic);
+ }
+
+ void branchTestInt32Truthy(bool b, const ValueOperand &value, Label *label);
+
+ void branchTestStringTruthy(bool b, const ValueOperand &value, Label *label);
+
+ void branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label);
+
+ void branchTestBooleanTruthy(bool b, const ValueOperand &operand, Label *label);
+
+ void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+ }
+ void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
+ ma_li(ScratchRegister, imm);
+ branchTest32(cond, lhs, ScratchRegister, label);
+ }
+ void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
+ ma_lw(SecondScratchReg, address);
+ branchTest32(cond, SecondScratchReg, imm, label);
+ }
+ void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
+ branchTest32(cond, lhs, rhs, label);
+ }
+ void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
+ branchTest32(cond, lhs, rhs, label);
+ }
+ void branchTestPtr(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
+ branchTest32(cond, lhs, imm, label);
+ }
+ void branchPtr(Condition cond, Register lhs, Register rhs, Label *label) {
+ ma_b(lhs, rhs, label, cond);
+ }
+ void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
+ ma_li(ScratchRegister, ptr);
+ ma_b(lhs, ScratchRegister, label, cond);
+ }
+ void branchPtr(Condition cond, Register lhs, ImmWord imm, Label *label) {
+ ma_b(lhs, Imm32(imm.value), label, cond);
+ }
+ void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label *label) {
+ branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
+ }
+ void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label *label) {
+ movePtr(imm, ScratchRegister);
+ branchPtr(cond, lhs, ScratchRegister, label);
+ }
+ void branchPtr(Condition cond, Register lhs, Imm32 imm, Label *label) {
+ ma_b(lhs, imm, label, cond);
+ }
+ void decBranchPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
+ subPtr(imm, lhs);
+ branch32(cond, lhs, Imm32(0), label);
+ }
+
+protected:
+ uint32_t getType(const Value &val);
+ void moveData(const Value &val, Register data);
+public:
+ void moveValue(const Value &val, Register type, Register data);
+
+ CodeOffsetJump jumpWithPatch(RepatchLabel *label);
+
+ template <typename T>
+ CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel *label) {
+ movePtr(ptr, ScratchRegister);
+ Label skipJump;
+ ma_b(reg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+ }
+
+ template <typename T>
+ CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel *label) {
+ loadPtr(addr, SecondScratchReg);
+ movePtr(ptr, ScratchRegister);
+ Label skipJump;
+ ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+ }
+ void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label *label) {
+ ma_lw(SecondScratchReg, addr);
+ ma_li(ScratchRegister, ptr);
+ ma_b(SecondScratchReg, ScratchRegister, label, cond);
+ }
+ void branchPtr(Condition cond, Address addr, ImmWord ptr, Label *label) {
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, Imm32(ptr.value), label, cond);
+ }
+ void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
+ branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
+ }
+ void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
+ loadPtr(addr, ScratchRegister);
+ ma_b(ScratchRegister, ptr, label, cond);
+ }
+ void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr,
+ Label *label) {
+ loadPtr(addr, ScratchRegister);
+ ma_b(ScratchRegister, ptr, label, cond);
+ }
+ void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
+ loadPtr(lhs, SecondScratchReg); // ma_b might use scratch
+ ma_b(SecondScratchReg, rhs, label, cond);
+ }
+ void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
+ loadPtr(lhs, ScratchRegister);
+ ma_b(ScratchRegister, rhs, label, cond);
+ }
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address, dest.fpu());
+ else
+ ma_lw(dest.gpr(), address);
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ else
+ load32(address, dest.gpr());
+ }
+
+ void moveValue(const Value &val, const ValueOperand &dest);
+
+ void moveValue(const ValueOperand &src, const ValueOperand &dest) {
+ MOZ_ASSERT(src.typeReg() != dest.payloadReg());
+ MOZ_ASSERT(src.payloadReg() != dest.typeReg());
+ if (src.typeReg() != dest.typeReg())
+ ma_move(dest.typeReg(), src.typeReg());
+ if (src.payloadReg() != dest.payloadReg())
+ ma_move(dest.payloadReg(), src.payloadReg());
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex &dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address &dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value &val, Address dest);
+ void storeValue(const Value &val, BaseIndex dest);
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex &addr, ValueOperand val);
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value &val) {
+ jsval_layout jv = JSVAL_TO_IMPL(val);
+ push(Imm32(jv.s.tag));
+ if (val.isMarkable())
+ push(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
+ else
+ push(Imm32(jv.s.payload.i32));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+ void pushValue(const Address &addr);
+ void Push(const ValueOperand &val) {
+ pushValue(val);
+ framePushed_ += sizeof(Value);
+ }
+ void Pop(const ValueOperand &val) {
+ popValue(val);
+ framePushed_ -= sizeof(Value);
+ }
+ void storePayload(const Value &val, Address dest);
+ void storePayload(Register src, Address dest);
+ void storePayload(const Value &val, Register base, Register index, int32_t shift = defaultShift);
+ void storePayload(Register src, Register base, Register index, int32_t shift = defaultShift);
+ void storeTypeTag(ImmTag tag, Address dest);
+ void storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift = defaultShift);
+
+ void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
+ ma_sll(frameSizeReg, frameSizeReg, Imm32(FRAMESIZE_SHIFT));
+ ma_or(frameSizeReg, frameSizeReg, Imm32(type));
+ }
+
+ void linkExitFrame();
+ void linkParallelExitFrame(const Register &pt);
+ void handleFailureWithHandler(void *handler);
+ void handleFailureWithHandlerTail();
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+ void Push(const Register ®) {
+ ma_push(reg);
+ adjustFrame(sizeof(intptr_t));
+ }
+ void Push(const Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+ }
+ void Push(const ImmWord imm) {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+ }
+ void Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+ }
+ void Push(const ImmGCPtr ptr) {
+ ma_li(ScratchRegister, ptr);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+ }
+ void Push(const FloatRegister &f) {
+ ma_push(f);
+ adjustFrame(sizeof(double));
+ }
+
+ CodeOffsetLabel PushWithPatch(const ImmWord &word) {
+ framePushed_ += sizeof(word.value);
+ return pushWithPatch(word);
+ }
+ CodeOffsetLabel PushWithPatch(const ImmPtr &imm) {
+ return PushWithPatch(ImmWord(uintptr_t(imm.value)));
+ }
+
+ void Pop(const Register ®) {
+ ma_pop(reg);
+ adjustFrame(-sizeof(intptr_t));
+ }
+ void implicitPop(uint32_t args) {
+ MOZ_ASSERT(args % sizeof(intptr_t) == 0);
+ adjustFrame(-args);
+ }
+ uint32_t framePushed() const {
+ return framePushed_;
+ }
+ void setFramePushed(uint32_t framePushed) {
+ framePushed_ = framePushed;
+ }
+
+ // Builds an exit frame on the stack, with a return address to an internal
+ // non-function. Returns offset to be passed to markSafepointAt().
+ bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
+
+ void callWithExitFrame(JitCode *target);
+ void callWithExitFrame(JitCode *target, Register dynStack);
+
+ // Makes an Ion call using the only two methods that it is sane for
+ // indep code to make a call
+ void callIon(const Register &callee);
+
+ void reserveStack(uint32_t amount);
+ void freeStack(uint32_t amount);
+ void freeStack(Register amount);
+
+ void add32(Register src, Register dest);
+ void add32(Imm32 imm, Register dest);
+ void add32(Imm32 imm, const Address &dest);
+ void sub32(Imm32 imm, Register dest);
+ void sub32(Register src, Register dest);
+
+ void and32(Imm32 imm, Register dest);
+ void and32(Imm32 imm, const Address &dest);
+ void or32(Imm32 imm, const Address &dest);
+ void xor32(Imm32 imm, Register dest);
+ void xorPtr(Imm32 imm, Register dest);
+ void xorPtr(Register src, Register dest);
+ void orPtr(Imm32 imm, Register dest);
+ void orPtr(Register src, Register dest);
+ void andPtr(Imm32 imm, Register dest);
+ void andPtr(Register src, Register dest);
+ void addPtr(Register src, Register dest);
+ void subPtr(Register src, Register dest);
+ void addPtr(const Address &src, Register dest);
+ void not32(Register reg);
+
+ void move32(const Imm32 &imm, const Register &dest);
+ void move32(const Register &src, const Register &dest);
+
+ void movePtr(const Register &src, const Register &dest);
+ void movePtr(const ImmWord &imm, const Register &dest);
+ void movePtr(const ImmPtr &imm, const Register &dest);
+ void movePtr(const AsmJSImmPtr &imm, const Register &dest);
+ void movePtr(const ImmGCPtr &imm, const Register &dest);
+
+ void load8SignExtend(const Address &address, const Register &dest);
+ void load8SignExtend(const BaseIndex &src, const Register &dest);
+
+ void load8ZeroExtend(const Address &address, const Register &dest);
+ void load8ZeroExtend(const BaseIndex &src, const Register &dest);
+
+ void load16SignExtend(const Address &address, const Register &dest);
+ void load16SignExtend(const BaseIndex &src, const Register &dest);
+
+ void load16ZeroExtend(const Address &address, const Register &dest);
+ void load16ZeroExtend(const BaseIndex &src, const Register &dest);
+
+ void load32(const Address &address, const Register &dest);
+ void load32(const BaseIndex &address, const Register &dest);
+ void load32(const AbsoluteAddress &address, const Register &dest);
+
+ void loadPtr(const Address &address, const Register &dest);
+ void loadPtr(const BaseIndex &src, const Register &dest);
+ void loadPtr(const AbsoluteAddress &address, const Register &dest);
+ void loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest);
+
+ void loadPrivate(const Address &address, const Register &dest);
+
+ void loadDouble(const Address &addr, const FloatRegister &dest);
+ void loadDouble(const BaseIndex &src, const FloatRegister &dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
+ void loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest);
+
+ void loadFloat32(const Address &addr, const FloatRegister &dest);
+ void loadFloat32(const BaseIndex &src, const FloatRegister &dest);
+
+ void store8(const Register &src, const Address &address);
+ void store8(const Imm32 &imm, const Address &address);
+ void store8(const Register &src, const BaseIndex &address);
+ void store8(const Imm32 &imm, const BaseIndex &address);
+
+ void store16(const Register &src, const Address &address);
+ void store16(const Imm32 &imm, const Address &address);
+ void store16(const Register &src, const BaseIndex &address);
+ void store16(const Imm32 &imm, const BaseIndex &address);
+
+ void store32(const Register &src, const AbsoluteAddress &address);
+ void store32(const Register &src, const Address &address);
+ void store32(const Register &src, const BaseIndex &address);
+ void store32(const Imm32 &src, const Address &address);
+ void store32(const Imm32 &src, const BaseIndex &address);
+
+ void storePtr(ImmWord imm, const Address &address);
+ void storePtr(ImmPtr imm, const Address &address);
+ void storePtr(ImmGCPtr imm, const Address &address);
+ void storePtr(Register src, const Address &address);
+ void storePtr(const Register &src, const AbsoluteAddress &dest);
+ void storeDouble(FloatRegister src, Address addr) {
+ ma_sd(src, addr);
+ }
+ void storeDouble(FloatRegister src, BaseIndex addr) {
+ MOZ_ASSERT(addr.offset == 0);
+ ma_sd(src, addr);
+ }
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_movd(dest, src);
+ }
+
+ void storeFloat32(FloatRegister src, Address addr) {
+ ma_ss(src, addr);
+ }
+ void storeFloat32(FloatRegister src, BaseIndex addr) {
+ MOZ_ASSERT(addr.offset == 0);
+ ma_ss(src, addr);
+ }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDoubleLo(zero, reg);
+ moveToDoubleHi(zero, reg);
+ }
+
+ void clampIntToUint8(Register reg) {
+ // look at (reg >> 8) if it is 0, then src shouldn't be clamped
+ // if it is <0, then we want to clamp to 0,
+ // otherwise, we wish to clamp to 255
+ Label done;
+ ma_move(ScratchRegister, reg);
+ as_sra(ScratchRegister, ScratchRegister, 8);
+ ma_b(ScratchRegister, ScratchRegister, &done, Assembler::Zero, ShortJump);
+ {
+ Label negative;
+ ma_b(ScratchRegister, ScratchRegister, &negative, Assembler::Signed, ShortJump);
+ {
+ ma_li(reg, Imm32(255));
+ ma_b(&done, ShortJump);
+ }
+ bind(&negative);
+ {
+ ma_move(reg, zero);
+ }
+ }
+ bind(&done);
+ }
+
+ void subPtr(Imm32 imm, const Register dest);
+ void addPtr(Imm32 imm, const Register dest);
+ void addPtr(Imm32 imm, const Address &dest);
+ void addPtr(ImmWord imm, const Register dest) {
+ addPtr(Imm32(imm.value), dest);
+ }
+ void addPtr(ImmPtr imm, const Register dest) {
+ addPtr(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void breakpoint();
+
+ void branchDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
+ Label *label);
+
+ void branchFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
+ Label *label);
+
+ void checkStackAlignment();
+
+ void alignPointerUp(Register src, Register dest, uint32_t alignment);
+
+ void rshiftPtr(Imm32 imm, Register dest) {
+ ma_srl(dest, dest, imm);
+ }
+ void lshiftPtr(Imm32 imm, Register dest) {
+ ma_sll(dest, dest, imm);
+ }
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
+
+ // Setup a call to C/C++ code, given the number of general arguments it
+ // takes. Note that this only supports cdecl.
+ //
+ // In order for alignment to work correctly, the MacroAssembler must have a
+ // consistent view of the stack displacement. It is okay to call "push"
+ // manually, however, if the stack alignment were to change, the macro
+ // assembler should be notified before starting a call.
+ void setupAlignedABICall(uint32_t args);
+
+ // Sets up an ABI call for when the alignment is not known. This may need a
+ // scratch register.
+ void setupUnalignedABICall(uint32_t args, const Register &scratch);
+
+ // Arguments must be assigned in a left-to-right order. This process may
+ // temporarily use more stack, in which case sp-relative addresses will be
+ // automatically adjusted. It is extremely important that sp-relative
+ // addresses are computed *after* setupABICall(). Furthermore, no
+ // operations should be emitted while setting arguments.
+ void passABIArg(const MoveOperand &from, MoveOp::Type type);
+ void passABIArg(const Register ®);
+ void passABIArg(const FloatRegister ®, MoveOp::Type type);
+ void passABIArg(const ValueOperand ®s);
+
+ protected:
+ bool buildOOLFakeExitFrame(void *fakeReturnAddr);
+
+ private:
+ void callWithABIPre(uint32_t *stackAdjust);
+ void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
+
+ public:
+ // Emits a call to a C/C++ function, resolving all argument moves.
+ void callWithABI(void *fun, MoveOp::Type result = MoveOp::GENERAL);
+ void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
+ void callWithABI(const Address &fun, MoveOp::Type result = MoveOp::GENERAL);
+
+ CodeOffsetLabel labelForPatch() {
+ return CodeOffsetLabel(nextOffset().getOffset());
+ }
+
+ void memIntToValue(Address Source, Address Dest) {
+ MOZ_ASSUME_UNREACHABLE("NYI");
+ }
+
+ void lea(Operand addr, Register dest) {
+ MOZ_ASSUME_UNREACHABLE("NYI");
+ }
+
+ void abiret() {
+ MOZ_ASSUME_UNREACHABLE("NYI");
+ }
+
+ void ma_storeImm(Imm32 imm, const Address &addr) {
+ ma_sw(imm, addr);
+ }
+
+ BufferOffset ma_BoundsCheck(Register bounded) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(bounded, Imm32(0));
+ return bo;
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+
+ void branchPtrInNurseryRange(Register ptr, Register temp, Label *label);
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_MacroAssembler_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.cpp
new file mode 100644
index 0000000..12864b0
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.cpp
@@ -0,0 +1,330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/MoveEmitter-mips.h"
+
+using namespace js;
+using namespace js::jit;
+
+MoveEmitterMIPS::MoveEmitterMIPS(MacroAssemblerMIPSCompat &masm)
+ : inCycle_(false),
+ masm(masm),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg)
+{
+ pushedAtStart_ = masm.framePushed();
+}
+
+void
+MoveEmitterMIPS::emit(const MoveResolver &moves)
+{
+ if (moves.hasCycles()) {
+ // Reserve stack for cycle resolution
+ masm.reserveStack(sizeof(double));
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++)
+ emit(moves.getMove(i));
+}
+
+MoveEmitterMIPS::~MoveEmitterMIPS()
+{
+ assertDone();
+}
+
+Address
+MoveEmitterMIPS::cycleSlot() const
+{
+ int offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::isInSignedRange(offset));
+ return Address(StackPointer, offset);
+}
+
+int32_t
+MoveEmitterMIPS::getAdjustedOffset(const MoveOperand &operand)
+{
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer)
+ return operand.disp();
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address
+MoveEmitterMIPS::getAdjustedAddress(const MoveOperand &operand)
+{
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+
+Register
+MoveEmitterMIPS::tempReg()
+{
+ spilledReg_ = SecondScratchReg;
+ return SecondScratchReg;
+}
+
+void
+MoveEmitterMIPS::breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloatReg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ masm.storeFloat32(temp, cycleSlot());
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloatReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot());
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot());
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot());
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloatReg;
+ masm.loadFloat32(cycleSlot(), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloatReg;
+ masm.loadDouble(cycleSlot(), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(), to.reg());
+ }
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::emitMove(const MoveOperand &from, const MoveOperand &to)
+{
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg())
+ masm.movePtr(from.reg(), to.reg());
+ else if (to.isMemory())
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ else
+ MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.loadPtr(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_ASSUME_UNREACHABLE("Invalid emitMove arguments.");
+ }
+}
+
+void
+MoveEmitterMIPS::emitFloat32Move(const MoveOperand &from, const MoveOperand &to)
+{
+ // Ensure that we can use ScratchFloatReg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), ScratchFloatReg);
+ masm.storeFloat32(ScratchFloatReg, getAdjustedAddress(to));
+ }
+}
+
+void
+MoveEmitterMIPS::emitDoubleMove(const MoveOperand &from, const MoveOperand &to)
+{
+ // Ensure that we can use ScratchFloatReg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloatReg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloatReg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssemblerMIPSCompat::passABIArg
+ if(to.reg() == a2)
+ masm.moveFromDoubleLo(from.floatReg(), a2);
+ else if(to.reg() == a3)
+ masm.moveFromDoubleHi(from.floatReg(), a3);
+ else
+ MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssemblerMIPSCompat::passABIArg
+ if(to.reg() == a2)
+ masm.loadPtr(getAdjustedAddress(from), a2);
+ else if(to.reg() == a3)
+ masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+ else
+ MOZ_ASSUME_UNREACHABLE("Invalid emitDoubleMove arguments.");
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchFloatReg);
+ masm.storeDouble(ScratchFloatReg, getAdjustedAddress(to));
+ }
+}
+
+void
+MoveEmitterMIPS::emit(const MoveOp &move)
+{
+ const MoveOperand &from = move.from();
+ const MoveOperand &to = move.to();
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type());
+ inCycle_ = false;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ MOZ_ASSERT(!inCycle_);
+ breakCycle(from, to, move.endCycleType());
+ inCycle_ = true;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::assertDone()
+{
+ MOZ_ASSERT(!inCycle_);
+}
+
+void
+MoveEmitterMIPS::finish()
+{
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.h b/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.h
new file mode 100644
index 0000000..d17820a
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/MoveEmitter-mips.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_MoveEmitter_mips_h
+#define jit_mips_MoveEmitter_mips_h
+
+#include "jit/IonMacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class CodeGenerator;
+
+class MoveEmitterMIPS
+{
+ bool inCycle_;
+ MacroAssemblerMIPSCompat &masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot() const;
+ int32_t getAdjustedOffset(const MoveOperand &operand);
+ Address getAdjustedAddress(const MoveOperand &operand);
+
+ void emitMove(const MoveOperand &from, const MoveOperand &to);
+ void emitFloat32Move(const MoveOperand &from, const MoveOperand &to);
+ void emitDoubleMove(const MoveOperand &from, const MoveOperand &to);
+ void breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type);
+ void completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type);
+ void emit(const MoveOp &move);
+
+ public:
+ MoveEmitterMIPS(MacroAssemblerMIPSCompat &masm);
+ ~MoveEmitterMIPS();
+ void emit(const MoveResolver &moves);
+ void finish();
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_MoveEmitter_mips_h */
diff --git a/src/third_party/mozjs/js/src/jit/mips/Trampoline-mips.cpp b/src/third_party/mozjs/js/src/jit/mips/Trampoline-mips.cpp
new file mode 100644
index 0000000..b456845
--- /dev/null
+++ b/src/third_party/mozjs/js/src/jit/mips/Trampoline-mips.cpp
@@ -0,0 +1,997 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/IonFrames.h"
+#include "jit/IonLinker.h"
+#include "jit/IonSpewer.h"
+#include "jit/JitCompartment.h"
+#include "jit/mips/Bailouts-mips.h"
+#include "jit/mips/BaselineHelpers-mips.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+
+#include "jit/ExecutionMode-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
+
+struct EnterJITRegs
+{
+ double f30;
+ double f28;
+ double f26;
+ double f24;
+ double f22;
+ double f20;
+
+ // empty slot for alignment
+ uintptr_t align;
+
+ // non-volatile registers.
+ uintptr_t ra;
+ uintptr_t s7;
+ uintptr_t s6;
+ uintptr_t s5;
+ uintptr_t s4;
+ uintptr_t s3;
+ uintptr_t s2;
+ uintptr_t s1;
+ uintptr_t s0;
+};
+
+struct EnterJITArgs
+{
+ // First 4 argumet placeholders
+ void *jitcode; // <- sp points here when function is entered.
+ int maxArgc;
+ Value *maxArgv;
+ InterpreterFrame *fp;
+
+ // Arguments on stack
+ CalleeToken calleeToken;
+ JSObject *scopeChain;
+ size_t numStackValues;
+ Value *vp;
+};
+
+static void
+GenerateReturn(MacroAssembler &masm, int returnCode)
+{
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra);
+
+ // Restore non-volatile floating point registers
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30);
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void
+GeneratePrologue(MacroAssembler &masm)
+{
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.reserveStack(sizeof(EnterJITRegs));
+ masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0)));
+ masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1)));
+ masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2)));
+ masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3)));
+ masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4)));
+ masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5)));
+ masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6)));
+ masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7)));
+ masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra)));
+
+ masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
+}
+
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void *code, int argc, Value *argv, InterpreterFrame *fp,
+ * CalleeToken calleeToken, JSObject *scopeChain, Value *vp)
+ * ...using standard EABI calling convention
+ */
+JitCode *
+JitRuntime::generateEnterJIT(JSContext *cx, EnterJitType type)
+{
+ const Register reg_code = a0;
+ const Register reg_argc = a1;
+ const Register reg_argv = a2;
+ const Register reg_frame = a3;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ MacroAssembler masm(cx);
+ GeneratePrologue(masm);
+
+ const Address slotToken(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
+ const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Load calleeToken into s2.
+ masm.loadPtr(slotToken, s2);
+
+ // Save stack pointer as baseline frame.
+ if (type == EnterJitBaseline)
+ masm.movePtr(StackPointer, BaselineFrameReg);
+
+ // Load the number of actual arguments into s3.
+ masm.loadPtr(slotVp, s3);
+ masm.unboxInt32(Address(s3, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8
+ masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0);
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6, s7);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(s2, Address(StackPointer, 0)); // callee token
+
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, JitFrame_Entry);
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ GeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(JSReturnOperand);
+ regs.take(OsrFrameReg);
+ regs.take(BaselineFrameReg);
+ regs.take(reg_code);
+
+ const Address slotNumStackValues(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, numStackValues));
+ const Address slotScopeChain(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, scopeChain));
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, ¬Osr, Assembler::Zero, ShortJump);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slotNumStackValues, numStackValues);
+
+ // Push return address, previous frame pointer.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, returnLabel.dest());
+ masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t)));
+ masm.storePtr(BaselineFrameReg, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = BaselineFrameReg;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_sll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ masm.enterFakeExitFrame();
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ masm.setupUnalignedABICall(3, scratch);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, jit::InitBaselineFrameForOsr));
+
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.freeStack(IonExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.ma_li(scratch, returnLabel.dest());
+ masm.jump(scratch);
+
+ masm.bind(¬Osr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.loadPtr(slotScopeChain, R1.scratchReg());
+ }
+
+ // Call the function with pushing return address to stack.
+ masm.ma_callIonHalfPush(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.bind(returnLabel.src());
+ if (!masm.addCodeLabel(returnLabel))
+ return nullptr;
+ }
+
+ // Pop arguments off the stack.
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(4), s0);
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the slotVp
+ masm.loadPtr(slotVp, s1);
+ masm.storeValue(JSReturnOperand, Address(s1, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+
+ Linker linker(masm);
+ AutoFlushICache afc("GenerateEnterJIT");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode *
+JitRuntime::generateInvalidator(JSContext *cx)
+{
+ MacroAssembler masm(cx);
+
+ // NOTE: Members ionScript_ and osiPointReturnAddress_ of
+ // InvalidationBailoutStack are already on the stack.
+ static const uint32_t STACK_DATA_SIZE = sizeof(InvalidationBailoutStack) -
+ 2 * sizeof(uintptr_t);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Make room for data on stack.
+ masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer);
+
+ // Save general purpose registers
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ Address address = Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() +
+ i * sizeof(uintptr_t));
+ masm.storePtr(Register::FromCode(i), address);
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::Total; i++)
+ masm.as_sd(FloatRegister::FromCode(i), StackPointer,
+ InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ masm.setupAlignedABICall(3);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, InvalidationBailout));
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+ IonSpew(IonSpew_Invalidate, " invalidation thunk created at %p", (void *) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode *
+JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut)
+{
+ MacroAssembler masm(cx);
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current
+ // frame. Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == s3);
+
+ Register numActArgsReg = t6;
+ Register calleeTokenReg = t7;
+ Register numArgsReg = t5;
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.loadPtr(Address(StackPointer, IonRectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.loadPtr(Address(StackPointer, IonRectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.load16ZeroExtend(Address(calleeTokenReg, JSFunction::offsetOfNargs()), numArgsReg);
+
+ masm.ma_subu(t1, numArgsReg, s3);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t3, t4));
+
+ masm.movePtr(StackPointer, t2); // Save %sp.
+
+ // Push undefined.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0));
+ masm.sub32(Imm32(1), t1);
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.addPtr(t0, t2); // t2 <- t2(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(IonRectifierFrameLayout)), t2);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop, initialSkip;
+
+ masm.ma_b(&initialSkip, ShortJump);
+
+ masm.bind(©LoopTop);
+ masm.subPtr(Imm32(sizeof(Value)), t2);
+ masm.sub32(Imm32(1), s3);
+
+ masm.bind(&initialSkip);
+
+ MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t));
+ // Read argument and push to stack.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ masm.ma_b(s3, s3, ©LoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // translate the framesize from values into bytes
+ masm.ma_addu(t0, numArgsReg, Imm32(1));
+ masm.lshiftPtr(Imm32(3), t0);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(t0, JitFrame_Rectifier);
+
+ // Construct IonJSFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t0, Address(StackPointer, 0));
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1);
+ masm.loadBaselineOrIonRaw(t1, t1, mode, nullptr);
+ masm.ma_callIonHalfPush(t1);
+
+ uint32_t returnOffset = masm.currentOffset();
+
+ // arg1
+ // ...
+ // argN
+ // num actual args
+ // callee token
+ // sizeDescriptor <- sp now
+ // return address
+
+ // Remove the rectifier frame.
+ // t0 <- descriptor with FrameType.
+ masm.loadPtr(Address(StackPointer, 0), t0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t0); // t0 <- descriptor.
+
+ // Discard descriptor, calleeToken and number of actual arguments.
+ masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+
+ // arg1
+ // ...
+ // argN <- sp now; t0 <- frame descriptor
+ // num actual args
+ // callee token
+ // sizeDescriptor
+ // return address
+
+ // Discard pushed arguments.
+ masm.addPtr(t0, StackPointer);
+
+ masm.ret();
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+ CodeOffsetLabel returnLabel(returnOffset);
+ returnLabel.fixup(&masm);
+ if (returnAddrOut)
+ *returnAddrOut = (void *) (code->raw() + returnLabel.offset());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ return code;
+}
+
+/* There are two different stack layouts when doing bailout. They are
+ * represented via class BailoutStack.
+ *
+ * - First case is when bailout is done trough bailout table. In this case
+ * table offset is stored in $ra (look at JitRuntime::generateBailoutTable())
+ * and thunk code should save it on stack. In this case frameClassId_ cannot
+ * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on
+ * the stack.
+ *
+ * - Other case is when bailout is done via out of line code (lazy bailout).
+ * In this case frame size is stored in $ra (look at
+ * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout(). Field
+ * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID
+ * (See: JitRuntime::generateBailoutHandler).
+ */
+static void
+GenerateBailoutThunk(JSContext *cx, MacroAssembler &masm, uint32_t frameClass)
+{
+ // NOTE: Members snapshotOffset_ and padding_ of BailoutStack
+ // are not stored in this function.
+ static const uint32_t bailoutDataSize = sizeof(BailoutStack) - 2 * sizeof(uintptr_t);
+ static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t);
+
+ // Make sure that alignment is proper.
+ masm.checkStackAlignment();
+
+ // Make room for data.
+ masm.subPtr(Imm32(bailoutDataSize), StackPointer);
+
+ // Save general purpose registers.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t);
+ masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uintptr_t i = 0; i < FloatRegisters::Total; i++)
+ masm.as_sd(FloatRegister::FromCode(i), StackPointer,
+ BailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Store the frameSize_ or tableOffset_ stored in ra
+ // See: JitRuntime::generateBailoutTable()
+ // See: CodeGeneratorMIPS::generateOutOfLineCode()
+ masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize()));
+
+ // Put frame class to stack
+ masm.storePtr(ImmWord(frameClass), Address(StackPointer, BailoutStack::offsetOfFrameClass()));
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, a0);
+ // Put pointer to BailoutInfo
+ masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer);
+ masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0));
+ masm.movePtr(StackPointer, a1);
+
+ masm.setupAlignedABICall(2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, Bailout));
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer,
+ bailoutInfoOutParamSize + BailoutStack::offsetOfFrameSize()), a1);
+
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize), StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+ } else {
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ // Remove the data this fuction added and frame size.
+ masm.addPtr(Imm32(bailoutDataSize + bailoutInfoOutParamSize + frameSize), StackPointer);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode *
+JitRuntime::generateBailoutTable(JSContext *cx, uint32_t frameClass)
+{
+ MacroAssembler masm(cx);
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) {
+ // Calculate offset to the end of table
+ int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE;
+
+ // We use the 'ra' as table offset later in GenerateBailoutThunk
+ masm.as_bal(BOffImm16(offset));
+ masm.nop();
+ }
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTable");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTable");
+#endif
+
+ return code;
+}
+
+JitCode *
+JitRuntime::generateBailoutHandler(JSContext *cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode *
+JitRuntime::generateVMWrapper(JSContext *cx, const VMFunction &f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ MacroAssembler masm(cx);
+
+ GeneralRegisterSet regs = GeneralRegisterSet(Register::Codes::WrapperMask);
+
+ static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrameAndLoadContext(&f, cxreg, regs.getAny(), f.executionMode);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_addu(argsBase, StackPointer, Imm32(IonExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = t0; // Use temporary register.
+ regs.take(outReg);
+ // Value outparam has to be 8 byte aligned because the called
+ // function can use sdc1 or ldc1 instructions to access it.
+ masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value));
+ masm.alignPointerUp(StackPointer, outReg, StackAlignment);
+ break;
+
+ case Type_Handle:
+ outReg = t0;
+ regs.take(outReg);
+ if (f.outParamRootType == VMFunction::RootValue) {
+ // Value outparam has to be 8 byte aligned because the called
+ // function can use sdc1 or ldc1 instructions to access it.
+ masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value));
+ masm.alignPointerUp(StackPointer, outReg, StackAlignment);
+ masm.storeValue(UndefinedValue(), Address(outReg, 0));
+ }
+ else {
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ }
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ outReg = t0;
+ regs.take(outReg);
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = t0;
+ regs.take(outReg);
+ // Double outparam has to be 8 byte aligned because the called
+ // function can use sdc1 or ldc1 instructions to access it.
+ masm.reserveStack((StackAlignment - sizeof(uintptr_t)) + sizeof(double));
+ masm.alignPointerUp(StackPointer, outReg, StackAlignment);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(f.argc(), regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByValue:
+ // Values should be passed by reference, not by value, so we
+ // assert that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(double);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (outReg != InvalidReg)
+ masm.passABIArg(outReg);
+
+ masm.callWithABI(f.wrapped);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel(f.executionMode));
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel(f.executionMode));
+ break;
+ default:
+ MOZ_ASSUME_UNREACHABLE("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ if (f.outParamRootType == VMFunction::RootValue) {
+ masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment);
+ masm.loadValue(Address(SecondScratchReg, 0), JSReturnOperand);
+ masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value));
+ }
+ else {
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ }
+ break;
+
+ case Type_Value:
+ masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment);
+ masm.loadValue(Address(SecondScratchReg, 0), JSReturnOperand);
+ masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(Value));
+ break;
+
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.alignPointerUp(StackPointer, SecondScratchReg, StackAlignment);
+ // Address is aligned, so we can use as_ld.
+ masm.as_ld(ReturnFloatReg, SecondScratchReg, 0);
+ } else {
+ masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
+ }
+ masm.freeStack((StackAlignment - sizeof(uintptr_t)) + sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(IonExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(uintptr_t) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode *wrapper = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have
+ // to use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ return wrapper;
+}
+
+JitCode *
+JitRuntime::generatePreBarrier(JSContext *cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ RegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(2, a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+
+ if (type == MIRType_Value) {
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkValueFromIon));
+ } else {
+ MOZ_ASSERT(type == MIRType_Shape);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, MarkShapeFromIon));
+ }
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext *, BaselineFrame *, uint8_t *, bool *);
+static const VMFunction HandleDebugTrapInfo = FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap);
+
+JitCode *
+JitRuntime::generateDebugTrapHandler(JSContext *cx)
+{
+ MacroAssembler masm(cx);
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ // Load BaselineFrame pointer in scratch1.
+ masm.movePtr(s5, scratch1);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
+ EmitEnterStubFrame(masm, scratch2);
+
+ JitCode *code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t)));
+ masm.storePtr(scratch1, Address(StackPointer, 0));
+
+ EmitCallVM(code, masm);
+
+ EmitLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+
+ // ra was restored by EmitLeaveStubFrame
+ masm.branch(ra);
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.movePtr(s5, StackPointer);
+ masm.pop(s5);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode *codeDbg = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+
+JitCode *
+JitRuntime::generateExceptionTailStub(JSContext *cx)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail();
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode *
+JitRuntime::generateBailoutTailStub(JSContext *cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(a1, a2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode *code = linker.newCode<NoGC>(cx, JSC::OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+