| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/regexp/jsregexp.h" |
| |
| #include <memory> |
| #include <vector> |
| |
| #include "src/base/platform/platform.h" |
| #include "src/compilation-cache.h" |
| #include "src/elements.h" |
| #include "src/execution.h" |
| #include "src/factory.h" |
| #include "src/isolate-inl.h" |
| #include "src/messages.h" |
| #include "src/ostreams.h" |
| #include "src/regexp/interpreter-irregexp.h" |
| #include "src/regexp/jsregexp-inl.h" |
| #include "src/regexp/regexp-macro-assembler-irregexp.h" |
| #include "src/regexp/regexp-macro-assembler-tracer.h" |
| #include "src/regexp/regexp-macro-assembler.h" |
| #include "src/regexp/regexp-parser.h" |
| #include "src/regexp/regexp-stack.h" |
| #include "src/runtime/runtime.h" |
| #include "src/splay-tree-inl.h" |
| #include "src/string-search.h" |
| #include "src/unicode-decoder.h" |
| #include "src/unicode-inl.h" |
| |
| #ifdef V8_INTL_SUPPORT |
| #include "unicode/uniset.h" |
| #include "unicode/utypes.h" |
| #endif // V8_INTL_SUPPORT |
| |
| #ifndef V8_INTERPRETED_REGEXP |
| #if V8_TARGET_ARCH_IA32 |
| #include "src/regexp/ia32/regexp-macro-assembler-ia32.h" |
| #elif V8_TARGET_ARCH_X64 |
| #include "src/regexp/x64/regexp-macro-assembler-x64.h" |
| #elif V8_TARGET_ARCH_ARM64 |
| #include "src/regexp/arm64/regexp-macro-assembler-arm64.h" |
| #elif V8_TARGET_ARCH_ARM |
| #include "src/regexp/arm/regexp-macro-assembler-arm.h" |
| #elif V8_TARGET_ARCH_PPC |
| #include "src/regexp/ppc/regexp-macro-assembler-ppc.h" |
| #elif V8_TARGET_ARCH_S390 |
| #include "src/regexp/s390/regexp-macro-assembler-s390.h" |
| #elif V8_TARGET_ARCH_MIPS |
| #include "src/regexp/mips/regexp-macro-assembler-mips.h" |
| #elif V8_TARGET_ARCH_MIPS64 |
| #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" |
| #else |
| #error Unsupported target architecture. |
| #endif |
| #endif |
| |
| |
| namespace v8 { |
| namespace internal { |
| |
| MUST_USE_RESULT |
| static inline MaybeHandle<Object> ThrowRegExpException( |
| Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) { |
| Isolate* isolate = re->GetIsolate(); |
| THROW_NEW_ERROR(isolate, NewSyntaxError(MessageTemplate::kMalformedRegExp, |
| pattern, error_text), |
| Object); |
| } |
| |
| |
| inline void ThrowRegExpException(Handle<JSRegExp> re, |
| Handle<String> error_text) { |
| USE(ThrowRegExpException(re, Handle<String>(re->Pattern()), error_text)); |
| } |
| |
| |
| ContainedInLattice AddRange(ContainedInLattice containment, |
| const int* ranges, |
| int ranges_length, |
| Interval new_range) { |
| DCHECK_EQ(1, ranges_length & 1); |
| DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]); |
| if (containment == kLatticeUnknown) return containment; |
| bool inside = false; |
| int last = 0; |
| for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) { |
| // Consider the range from last to ranges[i]. |
| // We haven't got to the new range yet. |
| if (ranges[i] <= new_range.from()) continue; |
| // New range is wholly inside last-ranges[i]. Note that new_range.to() is |
| // inclusive, but the values in ranges are not. |
| if (last <= new_range.from() && new_range.to() < ranges[i]) { |
| return Combine(containment, inside ? kLatticeIn : kLatticeOut); |
| } |
| return kLatticeUnknown; |
| } |
| return containment; |
| } |
| |
| // More makes code generation slower, less makes V8 benchmark score lower. |
| const int kMaxLookaheadForBoyerMoore = 8; |
| // In a 3-character pattern you can maximally step forwards 3 characters |
| // at a time, which is not always enough to pay for the extra logic. |
| const int kPatternTooShortForBoyerMoore = 2; |
| |
| // Identifies the sort of regexps where the regexp engine is faster |
| // than the code used for atom matches. |
| static bool HasFewDifferentCharacters(Handle<String> pattern) { |
| int length = Min(kMaxLookaheadForBoyerMoore, pattern->length()); |
| if (length <= kPatternTooShortForBoyerMoore) return false; |
| const int kMod = 128; |
| bool character_found[kMod]; |
| int different = 0; |
| memset(&character_found[0], 0, sizeof(character_found)); |
| for (int i = 0; i < length; i++) { |
| int ch = (pattern->Get(i) & (kMod - 1)); |
| if (!character_found[ch]) { |
| character_found[ch] = true; |
| different++; |
| // We declare a regexp low-alphabet if it has at least 3 times as many |
| // characters as it has different characters. |
| if (different * 3 > length) return false; |
| } |
| } |
| return true; |
| } |
| |
| // Generic RegExp methods. Dispatches to implementation specific methods. |
| |
| MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re, |
| Handle<String> pattern, |
| JSRegExp::Flags flags) { |
| DCHECK(pattern->IsFlat()); |
| |
| Isolate* isolate = re->GetIsolate(); |
| Zone zone(isolate->allocator(), ZONE_NAME); |
| CompilationCache* compilation_cache = isolate->compilation_cache(); |
| MaybeHandle<FixedArray> maybe_cached = |
| compilation_cache->LookupRegExp(pattern, flags); |
| Handle<FixedArray> cached; |
| if (maybe_cached.ToHandle(&cached)) { |
| re->set_data(*cached); |
| return re; |
| } |
| |
| PostponeInterruptsScope postpone(isolate); |
| RegExpCompileData parse_result; |
| FlatStringReader reader(isolate, pattern); |
| DCHECK(!isolate->has_pending_exception()); |
| if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags, |
| &parse_result)) { |
| // Throw an exception if we fail to parse the pattern. |
| return ThrowRegExpException(re, pattern, parse_result.error); |
| } |
| |
| bool has_been_compiled = false; |
| |
| if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) && |
| !HasFewDifferentCharacters(pattern)) { |
| // Parse-tree is a single atom that is equal to the pattern. |
| AtomCompile(re, pattern, flags, pattern); |
| has_been_compiled = true; |
| } else if (parse_result.tree->IsAtom() && !IsSticky(flags) && |
| parse_result.capture_count == 0) { |
| RegExpAtom* atom = parse_result.tree->AsAtom(); |
| Vector<const uc16> atom_pattern = atom->data(); |
| Handle<String> atom_string; |
| ASSIGN_RETURN_ON_EXCEPTION( |
| isolate, atom_string, |
| isolate->factory()->NewStringFromTwoByte(atom_pattern), Object); |
| if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) { |
| AtomCompile(re, pattern, flags, atom_string); |
| has_been_compiled = true; |
| } |
| } |
| if (!has_been_compiled) { |
| IrregexpInitialize(re, pattern, flags, parse_result.capture_count); |
| } |
| DCHECK(re->data()->IsFixedArray()); |
| // Compilation succeeded so the data is set on the regexp |
| // and we can store it in the cache. |
| Handle<FixedArray> data(FixedArray::cast(re->data())); |
| compilation_cache->PutRegExp(pattern, flags, data); |
| |
| return re; |
| } |
| |
| MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp, |
| Handle<String> subject, int index, |
| Handle<RegExpMatchInfo> last_match_info) { |
| switch (regexp->TypeTag()) { |
| case JSRegExp::ATOM: |
| return AtomExec(regexp, subject, index, last_match_info); |
| case JSRegExp::IRREGEXP: { |
| return IrregexpExec(regexp, subject, index, last_match_info); |
| } |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| // RegExp Atom implementation: Simple string search using indexOf. |
| |
| |
| void RegExpImpl::AtomCompile(Handle<JSRegExp> re, |
| Handle<String> pattern, |
| JSRegExp::Flags flags, |
| Handle<String> match_pattern) { |
| re->GetIsolate()->factory()->SetRegExpAtomData(re, |
| JSRegExp::ATOM, |
| pattern, |
| flags, |
| match_pattern); |
| } |
| |
| static void SetAtomLastCapture(Handle<RegExpMatchInfo> last_match_info, |
| String* subject, int from, int to) { |
| SealHandleScope shs(last_match_info->GetIsolate()); |
| last_match_info->SetNumberOfCaptureRegisters(2); |
| last_match_info->SetLastSubject(subject); |
| last_match_info->SetLastInput(subject); |
| last_match_info->SetCapture(0, from); |
| last_match_info->SetCapture(1, to); |
| } |
| |
| |
| int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp, |
| Handle<String> subject, |
| int index, |
| int32_t* output, |
| int output_size) { |
| Isolate* isolate = regexp->GetIsolate(); |
| |
| DCHECK_LE(0, index); |
| DCHECK_LE(index, subject->length()); |
| |
| subject = String::Flatten(subject); |
| DisallowHeapAllocation no_gc; // ensure vectors stay valid |
| |
| String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)); |
| int needle_len = needle->length(); |
| DCHECK(needle->IsFlat()); |
| DCHECK_LT(0, needle_len); |
| |
| if (index + needle_len > subject->length()) { |
| return RegExpImpl::RE_FAILURE; |
| } |
| |
| for (int i = 0; i < output_size; i += 2) { |
| String::FlatContent needle_content = needle->GetFlatContent(); |
| String::FlatContent subject_content = subject->GetFlatContent(); |
| DCHECK(needle_content.IsFlat()); |
| DCHECK(subject_content.IsFlat()); |
| // dispatch on type of strings |
| index = |
| (needle_content.IsOneByte() |
| ? (subject_content.IsOneByte() |
| ? SearchString(isolate, subject_content.ToOneByteVector(), |
| needle_content.ToOneByteVector(), index) |
| : SearchString(isolate, subject_content.ToUC16Vector(), |
| needle_content.ToOneByteVector(), index)) |
| : (subject_content.IsOneByte() |
| ? SearchString(isolate, subject_content.ToOneByteVector(), |
| needle_content.ToUC16Vector(), index) |
| : SearchString(isolate, subject_content.ToUC16Vector(), |
| needle_content.ToUC16Vector(), index))); |
| if (index == -1) { |
| return i / 2; // Return number of matches. |
| } else { |
| output[i] = index; |
| output[i+1] = index + needle_len; |
| index += needle_len; |
| } |
| } |
| return output_size / 2; |
| } |
| |
| Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject, |
| int index, |
| Handle<RegExpMatchInfo> last_match_info) { |
| Isolate* isolate = re->GetIsolate(); |
| |
| static const int kNumRegisters = 2; |
| STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize); |
| int32_t* output_registers = isolate->jsregexp_static_offsets_vector(); |
| |
| int res = AtomExecRaw(re, subject, index, output_registers, kNumRegisters); |
| |
| if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value(); |
| |
| DCHECK_EQ(res, RegExpImpl::RE_SUCCESS); |
| SealHandleScope shs(isolate); |
| SetAtomLastCapture(last_match_info, *subject, output_registers[0], |
| output_registers[1]); |
| return last_match_info; |
| } |
| |
| |
| // Irregexp implementation. |
| |
| // Ensures that the regexp object contains a compiled version of the |
| // source for either one-byte or two-byte subject strings. |
| // If the compiled version doesn't already exist, it is compiled |
| // from the source pattern. |
| // If compilation fails, an exception is thrown and this function |
| // returns false. |
| bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, |
| Handle<String> sample_subject, |
| bool is_one_byte) { |
| Object* compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte)); |
| #ifdef V8_INTERPRETED_REGEXP |
| if (compiled_code->IsByteArray()) return true; |
| #else // V8_INTERPRETED_REGEXP (RegExp native code) |
| if (compiled_code->IsCode()) return true; |
| #endif |
| return CompileIrregexp(re, sample_subject, is_one_byte); |
| } |
| |
| |
| bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, |
| Handle<String> sample_subject, |
| bool is_one_byte) { |
| // Compile the RegExp. |
| Isolate* isolate = re->GetIsolate(); |
| Zone zone(isolate->allocator(), ZONE_NAME); |
| PostponeInterruptsScope postpone(isolate); |
| #ifdef DEBUG |
| Object* entry = re->DataAt(JSRegExp::code_index(is_one_byte)); |
| // When arriving here entry can only be a smi representing an uncompiled |
| // regexp. |
| DCHECK(entry->IsSmi()); |
| int entry_value = Smi::ToInt(entry); |
| DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value); |
| #endif |
| |
| JSRegExp::Flags flags = re->GetFlags(); |
| |
| Handle<String> pattern(re->Pattern()); |
| pattern = String::Flatten(pattern); |
| RegExpCompileData compile_data; |
| FlatStringReader reader(isolate, pattern); |
| if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags, |
| &compile_data)) { |
| // Throw an exception if we fail to parse the pattern. |
| // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once. |
| USE(ThrowRegExpException(re, pattern, compile_data.error)); |
| return false; |
| } |
| RegExpEngine::CompilationResult result = |
| RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern, |
| sample_subject, is_one_byte); |
| if (result.error_message != nullptr) { |
| // Unable to compile regexp. |
| if (FLAG_abort_on_stack_or_string_length_overflow && |
| strncmp(result.error_message, "Stack overflow", 15) == 0) { |
| FATAL("Aborting on stack overflow"); |
| } |
| Handle<String> error_message = isolate->factory()->NewStringFromUtf8( |
| CStrVector(result.error_message)).ToHandleChecked(); |
| ThrowRegExpException(re, error_message); |
| return false; |
| } |
| |
| Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data())); |
| data->set(JSRegExp::code_index(is_one_byte), result.code); |
| SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map); |
| int register_max = IrregexpMaxRegisterCount(*data); |
| if (result.num_registers > register_max) { |
| SetIrregexpMaxRegisterCount(*data, result.num_registers); |
| } |
| |
| return true; |
| } |
| |
| |
| int RegExpImpl::IrregexpMaxRegisterCount(FixedArray* re) { |
| return Smi::cast( |
| re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value(); |
| } |
| |
| |
| void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) { |
| re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value)); |
| } |
| |
| void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re, |
| Handle<FixedArray> value) { |
| if (value.is_null()) { |
| re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero); |
| } else { |
| re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value); |
| } |
| } |
| |
| int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) { |
| return Smi::ToInt(re->get(JSRegExp::kIrregexpCaptureCountIndex)); |
| } |
| |
| |
| int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) { |
| return Smi::ToInt(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex)); |
| } |
| |
| |
| ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_one_byte) { |
| return ByteArray::cast(re->get(JSRegExp::code_index(is_one_byte))); |
| } |
| |
| |
| Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_one_byte) { |
| return Code::cast(re->get(JSRegExp::code_index(is_one_byte))); |
| } |
| |
| |
| void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re, |
| Handle<String> pattern, |
| JSRegExp::Flags flags, |
| int capture_count) { |
| // Initialize compiled code entries to null. |
| re->GetIsolate()->factory()->SetRegExpIrregexpData(re, |
| JSRegExp::IRREGEXP, |
| pattern, |
| flags, |
| capture_count); |
| } |
| |
| |
| int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp, |
| Handle<String> subject) { |
| DCHECK(subject->IsFlat()); |
| |
| // Check representation of the underlying storage. |
| bool is_one_byte = subject->IsOneByteRepresentationUnderneath(); |
| if (!EnsureCompiledIrregexp(regexp, subject, is_one_byte)) return -1; |
| |
| #ifdef V8_INTERPRETED_REGEXP |
| // Byte-code regexp needs space allocated for all its registers. |
| // The result captures are copied to the start of the registers array |
| // if the match succeeds. This way those registers are not clobbered |
| // when we set the last match info from last successful match. |
| return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) + |
| (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2; |
| #else // V8_INTERPRETED_REGEXP |
| // Native regexp only needs room to output captures. Registers are handled |
| // internally. |
| return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2; |
| #endif // V8_INTERPRETED_REGEXP |
| } |
| |
| |
| int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp, |
| Handle<String> subject, |
| int index, |
| int32_t* output, |
| int output_size) { |
| Isolate* isolate = regexp->GetIsolate(); |
| |
| Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate); |
| |
| DCHECK_LE(0, index); |
| DCHECK_LE(index, subject->length()); |
| DCHECK(subject->IsFlat()); |
| |
| bool is_one_byte = subject->IsOneByteRepresentationUnderneath(); |
| |
| #ifndef V8_INTERPRETED_REGEXP |
| DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2); |
| do { |
| EnsureCompiledIrregexp(regexp, subject, is_one_byte); |
| Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate); |
| // The stack is used to allocate registers for the compiled regexp code. |
| // This means that in case of failure, the output registers array is left |
| // untouched and contains the capture results from the previous successful |
| // match. We can use that to set the last match info lazily. |
| NativeRegExpMacroAssembler::Result res = |
| NativeRegExpMacroAssembler::Match(code, |
| subject, |
| output, |
| output_size, |
| index, |
| isolate); |
| if (res != NativeRegExpMacroAssembler::RETRY) { |
| DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION || |
| isolate->has_pending_exception()); |
| STATIC_ASSERT( |
| static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS); |
| STATIC_ASSERT( |
| static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE); |
| STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) |
| == RE_EXCEPTION); |
| return static_cast<IrregexpResult>(res); |
| } |
| // If result is RETRY, the string has changed representation, and we |
| // must restart from scratch. |
| // In this case, it means we must make sure we are prepared to handle |
| // the, potentially, different subject (the string can switch between |
| // being internal and external, and even between being Latin1 and UC16, |
| // but the characters are always the same). |
| IrregexpPrepare(regexp, subject); |
| is_one_byte = subject->IsOneByteRepresentationUnderneath(); |
| } while (true); |
| UNREACHABLE(); |
| #else // V8_INTERPRETED_REGEXP |
| |
| DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp)); |
| // We must have done EnsureCompiledIrregexp, so we can get the number of |
| // registers. |
| int number_of_capture_registers = |
| (IrregexpNumberOfCaptures(*irregexp) + 1) * 2; |
| int32_t* raw_output = &output[number_of_capture_registers]; |
| // We do not touch the actual capture result registers until we know there |
| // has been a match so that we can use those capture results to set the |
| // last match info. |
| for (int i = number_of_capture_registers - 1; i >= 0; i--) { |
| raw_output[i] = -1; |
| } |
| Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte), |
| isolate); |
| |
| IrregexpResult result = IrregexpInterpreter::Match(isolate, |
| byte_codes, |
| subject, |
| raw_output, |
| index); |
| if (result == RE_SUCCESS) { |
| // Copy capture results to the start of the registers array. |
| MemCopy(output, raw_output, number_of_capture_registers * sizeof(int32_t)); |
| } |
| if (result == RE_EXCEPTION) { |
| DCHECK(!isolate->has_pending_exception()); |
| isolate->StackOverflow(); |
| } |
| return result; |
| #endif // V8_INTERPRETED_REGEXP |
| } |
| |
| MaybeHandle<Object> RegExpImpl::IrregexpExec( |
| Handle<JSRegExp> regexp, Handle<String> subject, int previous_index, |
| Handle<RegExpMatchInfo> last_match_info) { |
| Isolate* isolate = regexp->GetIsolate(); |
| DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP); |
| |
| subject = String::Flatten(subject); |
| |
| // Prepare space for the return values. |
| #if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG) |
| if (FLAG_trace_regexp_bytecodes) { |
| String* pattern = regexp->Pattern(); |
| PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get()); |
| PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get()); |
| } |
| #endif |
| int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); |
| if (required_registers < 0) { |
| // Compiling failed with an exception. |
| DCHECK(isolate->has_pending_exception()); |
| return MaybeHandle<Object>(); |
| } |
| |
| int32_t* output_registers = nullptr; |
| if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) { |
| output_registers = NewArray<int32_t>(required_registers); |
| } |
| std::unique_ptr<int32_t[]> auto_release(output_registers); |
| if (output_registers == nullptr) { |
| output_registers = isolate->jsregexp_static_offsets_vector(); |
| } |
| |
| int res = RegExpImpl::IrregexpExecRaw( |
| regexp, subject, previous_index, output_registers, required_registers); |
| if (res == RE_SUCCESS) { |
| int capture_count = |
| IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())); |
| return SetLastMatchInfo( |
| last_match_info, subject, capture_count, output_registers); |
| } |
| if (res == RE_EXCEPTION) { |
| DCHECK(isolate->has_pending_exception()); |
| return MaybeHandle<Object>(); |
| } |
| DCHECK(res == RE_FAILURE); |
| return isolate->factory()->null_value(); |
| } |
| |
| Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo( |
| Handle<RegExpMatchInfo> last_match_info, Handle<String> subject, |
| int capture_count, int32_t* match) { |
| // This is the only place where match infos can grow. If, after executing the |
| // regexp, RegExpExecStub finds that the match info is too small, it restarts |
| // execution in RegExpImpl::Exec, which finally grows the match info right |
| // here. |
| |
| int capture_register_count = (capture_count + 1) * 2; |
| Handle<RegExpMatchInfo> result = |
| RegExpMatchInfo::ReserveCaptures(last_match_info, capture_register_count); |
| result->SetNumberOfCaptureRegisters(capture_register_count); |
| |
| if (*result != *last_match_info) { |
| // The match info has been reallocated, update the corresponding reference |
| // on the native context. |
| Isolate* isolate = last_match_info->GetIsolate(); |
| if (*last_match_info == *isolate->regexp_last_match_info()) { |
| isolate->native_context()->set_regexp_last_match_info(*result); |
| } else if (*last_match_info == *isolate->regexp_internal_match_info()) { |
| isolate->native_context()->set_regexp_internal_match_info(*result); |
| } |
| } |
| |
| DisallowHeapAllocation no_allocation; |
| if (match != nullptr) { |
| for (int i = 0; i < capture_register_count; i += 2) { |
| result->SetCapture(i, match[i]); |
| result->SetCapture(i + 1, match[i + 1]); |
| } |
| } |
| result->SetLastSubject(*subject); |
| result->SetLastInput(*subject); |
| return result; |
| } |
| |
| RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp, |
| Handle<String> subject, Isolate* isolate) |
| : register_array_(nullptr), |
| register_array_size_(0), |
| regexp_(regexp), |
| subject_(subject) { |
| #ifdef V8_INTERPRETED_REGEXP |
| bool interpreted = true; |
| #else |
| bool interpreted = false; |
| #endif // V8_INTERPRETED_REGEXP |
| |
| if (regexp_->TypeTag() == JSRegExp::ATOM) { |
| static const int kAtomRegistersPerMatch = 2; |
| registers_per_match_ = kAtomRegistersPerMatch; |
| // There is no distinction between interpreted and native for atom regexps. |
| interpreted = false; |
| } else { |
| registers_per_match_ = RegExpImpl::IrregexpPrepare(regexp_, subject_); |
| if (registers_per_match_ < 0) { |
| num_matches_ = -1; // Signal exception. |
| return; |
| } |
| } |
| |
| DCHECK(IsGlobal(regexp->GetFlags())); |
| if (!interpreted) { |
| register_array_size_ = |
| Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize); |
| max_matches_ = register_array_size_ / registers_per_match_; |
| } else { |
| // Global loop in interpreted regexp is not implemented. We choose |
| // the size of the offsets vector so that it can only store one match. |
| register_array_size_ = registers_per_match_; |
| max_matches_ = 1; |
| } |
| |
| if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) { |
| register_array_ = NewArray<int32_t>(register_array_size_); |
| } else { |
| register_array_ = isolate->jsregexp_static_offsets_vector(); |
| } |
| |
| // Set state so that fetching the results the first time triggers a call |
| // to the compiled regexp. |
| current_match_index_ = max_matches_ - 1; |
| num_matches_ = max_matches_; |
| DCHECK_LE(2, registers_per_match_); // Each match has at least one capture. |
| DCHECK_GE(register_array_size_, registers_per_match_); |
| int32_t* last_match = |
| ®ister_array_[current_match_index_ * registers_per_match_]; |
| last_match[0] = -1; |
| last_match[1] = 0; |
| } |
| |
| int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) { |
| if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() && |
| unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) && |
| unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) { |
| // Advance over the surrogate pair. |
| return last_index + 2; |
| } |
| return last_index + 1; |
| } |
| |
| // ------------------------------------------------------------------- |
| // Implementation of the Irregexp regular expression engine. |
| // |
| // The Irregexp regular expression engine is intended to be a complete |
| // implementation of ECMAScript regular expressions. It generates either |
| // bytecodes or native code. |
| |
| // The Irregexp regexp engine is structured in three steps. |
| // 1) The parser generates an abstract syntax tree. See ast.cc. |
| // 2) From the AST a node network is created. The nodes are all |
| // subclasses of RegExpNode. The nodes represent states when |
| // executing a regular expression. Several optimizations are |
| // performed on the node network. |
| // 3) From the nodes we generate either byte codes or native code |
| // that can actually execute the regular expression (perform |
| // the search). The code generation step is described in more |
| // detail below. |
| |
| // Code generation. |
| // |
| // The nodes are divided into four main categories. |
| // * Choice nodes |
| // These represent places where the regular expression can |
| // match in more than one way. For example on entry to an |
| // alternation (foo|bar) or a repetition (*, +, ? or {}). |
| // * Action nodes |
| // These represent places where some action should be |
| // performed. Examples include recording the current position |
| // in the input string to a register (in order to implement |
| // captures) or other actions on register for example in order |
| // to implement the counters needed for {} repetitions. |
| // * Matching nodes |
| // These attempt to match some element part of the input string. |
| // Examples of elements include character classes, plain strings |
| // or back references. |
| // * End nodes |
| // These are used to implement the actions required on finding |
| // a successful match or failing to find a match. |
| // |
| // The code generated (whether as byte codes or native code) maintains |
| // some state as it runs. This consists of the following elements: |
| // |
| // * The capture registers. Used for string captures. |
| // * Other registers. Used for counters etc. |
| // * The current position. |
| // * The stack of backtracking information. Used when a matching node |
| // fails to find a match and needs to try an alternative. |
| // |
| // Conceptual regular expression execution model: |
| // |
| // There is a simple conceptual model of regular expression execution |
| // which will be presented first. The actual code generated is a more |
| // efficient simulation of the simple conceptual model: |
| // |
| // * Choice nodes are implemented as follows: |
| // For each choice except the last { |
| // push current position |
| // push backtrack code location |
| // <generate code to test for choice> |
| // backtrack code location: |
| // pop current position |
| // } |
| // <generate code to test for last choice> |
| // |
| // * Actions nodes are generated as follows |
| // <push affected registers on backtrack stack> |
| // <generate code to perform action> |
| // push backtrack code location |
| // <generate code to test for following nodes> |
| // backtrack code location: |
| // <pop affected registers to restore their state> |
| // <pop backtrack location from stack and go to it> |
| // |
| // * Matching nodes are generated as follows: |
| // if input string matches at current position |
| // update current position |
| // <generate code to test for following nodes> |
| // else |
| // <pop backtrack location from stack and go to it> |
| // |
| // Thus it can be seen that the current position is saved and restored |
| // by the choice nodes, whereas the registers are saved and restored by |
| // by the action nodes that manipulate them. |
| // |
| // The other interesting aspect of this model is that nodes are generated |
| // at the point where they are needed by a recursive call to Emit(). If |
| // the node has already been code generated then the Emit() call will |
| // generate a jump to the previously generated code instead. In order to |
| // limit recursion it is possible for the Emit() function to put the node |
| // on a work list for later generation and instead generate a jump. The |
| // destination of the jump is resolved later when the code is generated. |
| // |
| // Actual regular expression code generation. |
| // |
| // Code generation is actually more complicated than the above. In order |
| // to improve the efficiency of the generated code some optimizations are |
| // performed |
| // |
| // * Choice nodes have 1-character lookahead. |
| // A choice node looks at the following character and eliminates some of |
| // the choices immediately based on that character. This is not yet |
| // implemented. |
| // * Simple greedy loops store reduced backtracking information. |
| // A quantifier like /.*foo/m will greedily match the whole input. It will |
| // then need to backtrack to a point where it can match "foo". The naive |
| // implementation of this would push each character position onto the |
| // backtracking stack, then pop them off one by one. This would use space |
| // proportional to the length of the input string. However since the "." |
| // can only match in one way and always has a constant length (in this case |
| // of 1) it suffices to store the current position on the top of the stack |
| // once. Matching now becomes merely incrementing the current position and |
| // backtracking becomes decrementing the current position and checking the |
| // result against the stored current position. This is faster and saves |
| // space. |
| // * The current state is virtualized. |
| // This is used to defer expensive operations until it is clear that they |
| // are needed and to generate code for a node more than once, allowing |
| // specialized an efficient versions of the code to be created. This is |
| // explained in the section below. |
| // |
| // Execution state virtualization. |
| // |
| // Instead of emitting code, nodes that manipulate the state can record their |
| // manipulation in an object called the Trace. The Trace object can record a |
| // current position offset, an optional backtrack code location on the top of |
| // the virtualized backtrack stack and some register changes. When a node is |
| // to be emitted it can flush the Trace or update it. Flushing the Trace |
| // will emit code to bring the actual state into line with the virtual state. |
| // Avoiding flushing the state can postpone some work (e.g. updates of capture |
| // registers). Postponing work can save time when executing the regular |
| // expression since it may be found that the work never has to be done as a |
| // failure to match can occur. In addition it is much faster to jump to a |
| // known backtrack code location than it is to pop an unknown backtrack |
| // location from the stack and jump there. |
| // |
| // The virtual state found in the Trace affects code generation. For example |
| // the virtual state contains the difference between the actual current |
| // position and the virtual current position, and matching code needs to use |
| // this offset to attempt a match in the correct location of the input |
| // string. Therefore code generated for a non-trivial trace is specialized |
| // to that trace. The code generator therefore has the ability to generate |
| // code for each node several times. In order to limit the size of the |
| // generated code there is an arbitrary limit on how many specialized sets of |
| // code may be generated for a given node. If the limit is reached, the |
| // trace is flushed and a generic version of the code for a node is emitted. |
| // This is subsequently used for that node. The code emitted for non-generic |
| // trace is not recorded in the node and so it cannot currently be reused in |
| // the event that code generation is requested for an identical trace. |
| |
| |
| void RegExpTree::AppendToText(RegExpText* text, Zone* zone) { |
| UNREACHABLE(); |
| } |
| |
| |
| void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) { |
| text->AddElement(TextElement::Atom(this), zone); |
| } |
| |
| |
| void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) { |
| text->AddElement(TextElement::CharClass(this), zone); |
| } |
| |
| |
| void RegExpText::AppendToText(RegExpText* text, Zone* zone) { |
| for (int i = 0; i < elements()->length(); i++) |
| text->AddElement(elements()->at(i), zone); |
| } |
| |
| |
| TextElement TextElement::Atom(RegExpAtom* atom) { |
| return TextElement(ATOM, atom); |
| } |
| |
| |
| TextElement TextElement::CharClass(RegExpCharacterClass* char_class) { |
| return TextElement(CHAR_CLASS, char_class); |
| } |
| |
| |
| int TextElement::length() const { |
| switch (text_type()) { |
| case ATOM: |
| return atom()->length(); |
| |
| case CHAR_CLASS: |
| return 1; |
| } |
| UNREACHABLE(); |
| } |
| |
| |
| DispatchTable* ChoiceNode::GetTable(bool ignore_case) { |
| if (table_ == nullptr) { |
| table_ = new(zone()) DispatchTable(zone()); |
| DispatchTableConstructor cons(table_, ignore_case, zone()); |
| cons.BuildTable(this); |
| } |
| return table_; |
| } |
| |
| |
| class FrequencyCollator { |
| public: |
| FrequencyCollator() : total_samples_(0) { |
| for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) { |
| frequencies_[i] = CharacterFrequency(i); |
| } |
| } |
| |
| void CountCharacter(int character) { |
| int index = (character & RegExpMacroAssembler::kTableMask); |
| frequencies_[index].Increment(); |
| total_samples_++; |
| } |
| |
| // Does not measure in percent, but rather per-128 (the table size from the |
| // regexp macro assembler). |
| int Frequency(int in_character) { |
| DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character); |
| if (total_samples_ < 1) return 1; // Division by zero. |
| int freq_in_per128 = |
| (frequencies_[in_character].counter() * 128) / total_samples_; |
| return freq_in_per128; |
| } |
| |
| private: |
| class CharacterFrequency { |
| public: |
| CharacterFrequency() : counter_(0), character_(-1) { } |
| explicit CharacterFrequency(int character) |
| : counter_(0), character_(character) { } |
| |
| void Increment() { counter_++; } |
| int counter() { return counter_; } |
| int character() { return character_; } |
| |
| private: |
| int counter_; |
| int character_; |
| }; |
| |
| |
| private: |
| CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize]; |
| int total_samples_; |
| }; |
| |
| |
| class RegExpCompiler { |
| public: |
| RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count, |
| bool is_one_byte); |
| |
| int AllocateRegister() { |
| if (next_register_ >= RegExpMacroAssembler::kMaxRegister) { |
| reg_exp_too_big_ = true; |
| return next_register_; |
| } |
| return next_register_++; |
| } |
| |
| // Lookarounds to match lone surrogates for unicode character class matches |
| // are never nested. We can therefore reuse registers. |
| int UnicodeLookaroundStackRegister() { |
| if (unicode_lookaround_stack_register_ == kNoRegister) { |
| unicode_lookaround_stack_register_ = AllocateRegister(); |
| } |
| return unicode_lookaround_stack_register_; |
| } |
| |
| int UnicodeLookaroundPositionRegister() { |
| if (unicode_lookaround_position_register_ == kNoRegister) { |
| unicode_lookaround_position_register_ = AllocateRegister(); |
| } |
| return unicode_lookaround_position_register_; |
| } |
| |
| RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler, |
| RegExpNode* start, |
| int capture_count, |
| Handle<String> pattern); |
| |
| inline void AddWork(RegExpNode* node) { |
| if (!node->on_work_list() && !node->label()->is_bound()) { |
| node->set_on_work_list(true); |
| work_list_->push_back(node); |
| } |
| } |
| |
| static const int kImplementationOffset = 0; |
| static const int kNumberOfRegistersOffset = 0; |
| static const int kCodeOffset = 1; |
| |
| RegExpMacroAssembler* macro_assembler() { return macro_assembler_; } |
| EndNode* accept() { return accept_; } |
| |
| static const int kMaxRecursion = 100; |
| inline int recursion_depth() { return recursion_depth_; } |
| inline void IncrementRecursionDepth() { recursion_depth_++; } |
| inline void DecrementRecursionDepth() { recursion_depth_--; } |
| |
| void SetRegExpTooBig() { reg_exp_too_big_ = true; } |
| |
| inline bool one_byte() { return one_byte_; } |
| inline bool optimize() { return optimize_; } |
| inline void set_optimize(bool value) { optimize_ = value; } |
| inline bool limiting_recursion() { return limiting_recursion_; } |
| inline void set_limiting_recursion(bool value) { |
| limiting_recursion_ = value; |
| } |
| bool read_backward() { return read_backward_; } |
| void set_read_backward(bool value) { read_backward_ = value; } |
| FrequencyCollator* frequency_collator() { return &frequency_collator_; } |
| |
| int current_expansion_factor() { return current_expansion_factor_; } |
| void set_current_expansion_factor(int value) { |
| current_expansion_factor_ = value; |
| } |
| |
| Isolate* isolate() const { return isolate_; } |
| Zone* zone() const { return zone_; } |
| |
| static const int kNoRegister = -1; |
| |
| private: |
| EndNode* accept_; |
| int next_register_; |
| int unicode_lookaround_stack_register_; |
| int unicode_lookaround_position_register_; |
| std::vector<RegExpNode*>* work_list_; |
| int recursion_depth_; |
| RegExpMacroAssembler* macro_assembler_; |
| bool one_byte_; |
| bool reg_exp_too_big_; |
| bool limiting_recursion_; |
| bool optimize_; |
| bool read_backward_; |
| int current_expansion_factor_; |
| FrequencyCollator frequency_collator_; |
| Isolate* isolate_; |
| Zone* zone_; |
| }; |
| |
| |
| class RecursionCheck { |
| public: |
| explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) { |
| compiler->IncrementRecursionDepth(); |
| } |
| ~RecursionCheck() { compiler_->DecrementRecursionDepth(); } |
| private: |
| RegExpCompiler* compiler_; |
| }; |
| |
| |
| static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) { |
| return RegExpEngine::CompilationResult(isolate, "RegExp too big"); |
| } |
| |
| |
| // Attempts to compile the regexp using an Irregexp code generator. Returns |
| // a fixed array or a null handle depending on whether it succeeded. |
| RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count, |
| bool one_byte) |
| : next_register_(2 * (capture_count + 1)), |
| unicode_lookaround_stack_register_(kNoRegister), |
| unicode_lookaround_position_register_(kNoRegister), |
| work_list_(nullptr), |
| recursion_depth_(0), |
| one_byte_(one_byte), |
| reg_exp_too_big_(false), |
| limiting_recursion_(false), |
| optimize_(FLAG_regexp_optimization), |
| read_backward_(false), |
| current_expansion_factor_(1), |
| frequency_collator_(), |
| isolate_(isolate), |
| zone_(zone) { |
| accept_ = new(zone) EndNode(EndNode::ACCEPT, zone); |
| DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1); |
| } |
| |
| |
| RegExpEngine::CompilationResult RegExpCompiler::Assemble( |
| RegExpMacroAssembler* macro_assembler, |
| RegExpNode* start, |
| int capture_count, |
| Handle<String> pattern) { |
| Isolate* isolate = pattern->GetHeap()->isolate(); |
| |
| #ifdef DEBUG |
| if (FLAG_trace_regexp_assembler) |
| macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler); |
| else |
| #endif |
| macro_assembler_ = macro_assembler; |
| |
| std::vector<RegExpNode*> work_list; |
| work_list_ = &work_list; |
| Label fail; |
| macro_assembler_->PushBacktrack(&fail); |
| Trace new_trace; |
| start->Emit(this, &new_trace); |
| macro_assembler_->Bind(&fail); |
| macro_assembler_->Fail(); |
| while (!work_list.empty()) { |
| RegExpNode* node = work_list.back(); |
| work_list.pop_back(); |
| node->set_on_work_list(false); |
| if (!node->label()->is_bound()) node->Emit(this, &new_trace); |
| } |
| if (reg_exp_too_big_) { |
| macro_assembler_->AbortedCodeGeneration(); |
| return IrregexpRegExpTooBig(isolate_); |
| } |
| |
| Handle<HeapObject> code = macro_assembler_->GetCode(pattern); |
| isolate->IncreaseTotalRegexpCodeGenerated(code->Size()); |
| work_list_ = nullptr; |
| #if defined(ENABLE_DISASSEMBLER) && !defined(V8_INTERPRETED_REGEXP) |
| if (FLAG_print_code) { |
| CodeTracer::Scope trace_scope(isolate->GetCodeTracer()); |
| OFStream os(trace_scope.file()); |
| Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os); |
| } |
| #endif |
| #ifdef DEBUG |
| if (FLAG_trace_regexp_assembler) { |
| delete macro_assembler_; |
| } |
| #endif |
| return RegExpEngine::CompilationResult(*code, next_register_); |
| } |
| |
| |
| bool Trace::DeferredAction::Mentions(int that) { |
| if (action_type() == ActionNode::CLEAR_CAPTURES) { |
| Interval range = static_cast<DeferredClearCaptures*>(this)->range(); |
| return range.Contains(that); |
| } else { |
| return reg() == that; |
| } |
| } |
| |
| |
| bool Trace::mentions_reg(int reg) { |
| for (DeferredAction* action = actions_; action != nullptr; |
| action = action->next()) { |
| if (action->Mentions(reg)) |
| return true; |
| } |
| return false; |
| } |
| |
| |
| bool Trace::GetStoredPosition(int reg, int* cp_offset) { |
| DCHECK_EQ(0, *cp_offset); |
| for (DeferredAction* action = actions_; action != nullptr; |
| action = action->next()) { |
| if (action->Mentions(reg)) { |
| if (action->action_type() == ActionNode::STORE_POSITION) { |
| *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset(); |
| return true; |
| } else { |
| return false; |
| } |
| } |
| } |
| return false; |
| } |
| |
| |
| int Trace::FindAffectedRegisters(OutSet* affected_registers, |
| Zone* zone) { |
| int max_register = RegExpCompiler::kNoRegister; |
| for (DeferredAction* action = actions_; action != nullptr; |
| action = action->next()) { |
| if (action->action_type() == ActionNode::CLEAR_CAPTURES) { |
| Interval range = static_cast<DeferredClearCaptures*>(action)->range(); |
| for (int i = range.from(); i <= range.to(); i++) |
| affected_registers->Set(i, zone); |
| if (range.to() > max_register) max_register = range.to(); |
| } else { |
| affected_registers->Set(action->reg(), zone); |
| if (action->reg() > max_register) max_register = action->reg(); |
| } |
| } |
| return max_register; |
| } |
| |
| |
| void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler, |
| int max_register, |
| const OutSet& registers_to_pop, |
| const OutSet& registers_to_clear) { |
| for (int reg = max_register; reg >= 0; reg--) { |
| if (registers_to_pop.Get(reg)) { |
| assembler->PopRegister(reg); |
| } else if (registers_to_clear.Get(reg)) { |
| int clear_to = reg; |
| while (reg > 0 && registers_to_clear.Get(reg - 1)) { |
| reg--; |
| } |
| assembler->ClearRegisters(reg, clear_to); |
| } |
| } |
| } |
| |
| |
| void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler, |
| int max_register, |
| const OutSet& affected_registers, |
| OutSet* registers_to_pop, |
| OutSet* registers_to_clear, |
| Zone* zone) { |
| // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1. |
| const int push_limit = (assembler->stack_limit_slack() + 1) / 2; |
| |
| // Count pushes performed to force a stack limit check occasionally. |
| int pushes = 0; |
| |
| for (int reg = 0; reg <= max_register; reg++) { |
| if (!affected_registers.Get(reg)) { |
| continue; |
| } |
| |
| // The chronologically first deferred action in the trace |
| // is used to infer the action needed to restore a register |
| // to its previous state (or not, if it's safe to ignore it). |
| enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR }; |
| DeferredActionUndoType undo_action = IGNORE; |
| |
| int value = 0; |
| bool absolute = false; |
| bool clear = false; |
| static const int kNoStore = kMinInt; |
| int store_position = kNoStore; |
| // This is a little tricky because we are scanning the actions in reverse |
| // historical order (newest first). |
| for (DeferredAction* action = actions_; action != nullptr; |
| action = action->next()) { |
| if (action->Mentions(reg)) { |
| switch (action->action_type()) { |
| case ActionNode::SET_REGISTER: { |
| Trace::DeferredSetRegister* psr = |
| static_cast<Trace::DeferredSetRegister*>(action); |
| if (!absolute) { |
| value += psr->value(); |
| absolute = true; |
| } |
| // SET_REGISTER is currently only used for newly introduced loop |
| // counters. They can have a significant previous value if they |
| // occur in a loop. TODO(lrn): Propagate this information, so |
| // we can set undo_action to IGNORE if we know there is no value to |
| // restore. |
| undo_action = RESTORE; |
| DCHECK_EQ(store_position, kNoStore); |
| DCHECK(!clear); |
| break; |
| } |
| case ActionNode::INCREMENT_REGISTER: |
| if (!absolute) { |
| value++; |
| } |
| DCHECK_EQ(store_position, kNoStore); |
| DCHECK(!clear); |
| undo_action = RESTORE; |
| break; |
| case ActionNode::STORE_POSITION: { |
| Trace::DeferredCapture* pc = |
| static_cast<Trace::DeferredCapture*>(action); |
| if (!clear && store_position == kNoStore) { |
| store_position = pc->cp_offset(); |
| } |
| |
| // For captures we know that stores and clears alternate. |
| // Other register, are never cleared, and if the occur |
| // inside a loop, they might be assigned more than once. |
| if (reg <= 1) { |
| // Registers zero and one, aka "capture zero", is |
| // always set correctly if we succeed. There is no |
| // need to undo a setting on backtrack, because we |
| // will set it again or fail. |
| undo_action = IGNORE; |
| } else { |
| undo_action = pc->is_capture() ? CLEAR : RESTORE; |
| } |
| DCHECK(!absolute); |
| DCHECK_EQ(value, 0); |
| break; |
| } |
| case ActionNode::CLEAR_CAPTURES: { |
| // Since we're scanning in reverse order, if we've already |
| // set the position we have to ignore historically earlier |
| // clearing operations. |
| if (store_position == kNoStore) { |
| clear = true; |
| } |
| undo_action = RESTORE; |
| DCHECK(!absolute); |
| DCHECK_EQ(value, 0); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| // Prepare for the undo-action (e.g., push if it's going to be popped). |
| if (undo_action == RESTORE) { |
| pushes++; |
| RegExpMacroAssembler::StackCheckFlag stack_check = |
| RegExpMacroAssembler::kNoStackLimitCheck; |
| if (pushes == push_limit) { |
| stack_check = RegExpMacroAssembler::kCheckStackLimit; |
| pushes = 0; |
| } |
| |
| assembler->PushRegister(reg, stack_check); |
| registers_to_pop->Set(reg, zone); |
| } else if (undo_action == CLEAR) { |
| registers_to_clear->Set(reg, zone); |
| } |
| // Perform the chronologically last action (or accumulated increment) |
| // for the register. |
| if (store_position != kNoStore) { |
| assembler->WriteCurrentPositionToRegister(reg, store_position); |
| } else if (clear) { |
| assembler->ClearRegisters(reg, reg); |
| } else if (absolute) { |
| assembler->SetRegister(reg, value); |
| } else if (value != 0) { |
| assembler->AdvanceRegister(reg, value); |
| } |
| } |
| } |
| |
| |
| // This is called as we come into a loop choice node and some other tricky |
| // nodes. It normalizes the state of the code generator to ensure we can |
| // generate generic code. |
| void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) { |
| RegExpMacroAssembler* assembler = compiler->macro_assembler(); |
| |
| DCHECK(!is_trivial()); |
| |
| if (actions_ == nullptr && backtrack() == nullptr) { |
| // Here we just have some deferred cp advances to fix and we are back to |
| // a normal situation. We may also have to forget some information gained |
| // through a quick check that was already performed. |
| if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_); |
| // Create a new trivial state and generate the node with that. |
| Trace new_state; |
| successor->Emit(compiler, &new_state); |
| return; |
| } |
| |
| // Generate deferred actions here along with code to undo them again. |
| OutSet affected_registers; |
| |
| if (backtrack() != nullptr) { |
| // Here we have a concrete backtrack location. These are set up by choice |
| // nodes and so they indicate that we have a deferred save of the current |
| // position which we may need to emit here. |
| assembler->PushCurrentPosition(); |
| } |
| |
| int max_register = FindAffectedRegisters(&affected_registers, |
| compiler->zone()); |
| OutSet registers_to_pop; |
| OutSet registers_to_clear; |
| PerformDeferredActions(assembler, |
| max_register, |
| affected_registers, |
| ®isters_to_pop, |
| ®isters_to_clear, |
| compiler->zone()); |
| if (cp_offset_ != 0) { |
| assembler->AdvanceCurrentPosition(cp_offset_); |
| } |
| |
| // Create a new trivial state and generate the node with that. |
| Label undo; |
| assembler->PushBacktrack(&undo); |
| if (successor->KeepRecursing(compiler)) { |
| Trace new_state; |
| successor->Emit(compiler, &new_state); |
| } else { |
| compiler->AddWork(successor); |
| assembler->GoTo(successor->label()); |
| } |
| |
| // On backtrack we need to restore state. |
| assembler->Bind(&undo); |
| RestoreAffectedRegisters(assembler, |
| max_register, |
| registers_to_pop, |
| registers_to_clear); |
| if (backtrack() == nullptr) { |
| assembler->Backtrack(); |
| } else { |
| assembler->PopCurrentPosition(); |
| assembler->GoTo(backtrack()); |
| } |
| } |
| |
| |
| void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) { |
| RegExpMacroAssembler* assembler = compiler->macro_assembler(); |
| |
| // Omit flushing the trace. We discard the entire stack frame anyway. |
| |
| if (!label()->is_bound()) { |
| // We are completely independent of the trace, since we ignore it, |
| // so this code can be used as the generic version. |
| assembler->Bind(label()); |
| } |
| |
| // Throw away everything on the backtrack stack since the start |
| // of the negative submatch and restore the character position. |
| assembler->ReadCurrentPositionFromRegister(current_position_register_); |
| assembler->ReadStackPointerFromRegister(stack_pointer_register_); |
| if (clear_capture_count_ > 0) { |
| // Clear any captures that might have been performed during the success |
| // of the body of the negative look-ahead. |
| int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1; |
| assembler->ClearRegisters(clear_capture_start_, clear_capture_end); |
| } |
| // Now that we have unwound the stack we find at the top of the stack the |
| // backtrack that the BeginSubmatch node got. |
| assembler->Backtrack(); |
| } |
| |
| |
| void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) { |
| if (!trace->is_trivial()) { |
| trace->Flush(compiler, this); |
| return; |
| } |
| RegExpMacroAssembler* assembler = compiler->macro_assembler(); |
| if (!label()->is_bound()) { |
| assembler->Bind(label()); |
| } |
| switch (action_) { |
| case ACCEPT: |
| assembler->Succeed(); |
| return; |
| case BACKTRACK: |
| assembler->GoTo(trace->backtrack()); |
| return; |
| case NEGATIVE_SUBMATCH_SUCCESS: |
| // This case is handled in a different virtual method. |
| UNREACHABLE(); |
| } |
| UNIMPLEMENTED(); |
| } |
| |
| |
| void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) { |
| if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone); |
| guards_->Add(guard, zone); |
| } |
| |
| |
| ActionNode* ActionNode::SetRegister(int reg, |
| int val, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(SET_REGISTER, on_success); |
| result->data_.u_store_register.reg = reg; |
| result->data_.u_store_register.value = val; |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success); |
| result->data_.u_increment_register.reg = reg; |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::StorePosition(int reg, |
| bool is_capture, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(STORE_POSITION, on_success); |
| result->data_.u_position_register.reg = reg; |
| result->data_.u_position_register.is_capture = is_capture; |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::ClearCaptures(Interval range, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success); |
| result->data_.u_clear_captures.range_from = range.from(); |
| result->data_.u_clear_captures.range_to = range.to(); |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::BeginSubmatch(int stack_reg, |
| int position_reg, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success); |
| result->data_.u_submatch.stack_pointer_register = stack_reg; |
| result->data_.u_submatch.current_position_register = position_reg; |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg, |
| int position_reg, |
| int clear_register_count, |
| int clear_register_from, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success); |
| result->data_.u_submatch.stack_pointer_register = stack_reg; |
| result->data_.u_submatch.current_position_register = position_reg; |
| result->data_.u_submatch.clear_register_count = clear_register_count; |
| result->data_.u_submatch.clear_register_from = clear_register_from; |
| return result; |
| } |
| |
| |
| ActionNode* ActionNode::EmptyMatchCheck(int start_register, |
| int repetition_register, |
| int repetition_limit, |
| RegExpNode* on_success) { |
| ActionNode* result = |
| new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success); |
| result->data_.u_empty_match_check.start_register = start_register; |
| result->data_.u_empty_match_check.repetition_register = repetition_register; |
| result->data_.u_empty_match_check.repetition_limit = repetition_limit; |
| return result; |
| } |
| |
| |
| #define DEFINE_ACCEPT(Type) \ |
| void Type##Node::Accept(NodeVisitor* visitor) { \ |
| visitor->Visit##Type(this); \ |
| } |
| FOR_EACH_NODE_TYPE(DEFINE_ACCEPT) |
| #undef DEFINE_ACCEPT |
| |
| |
| void LoopChoiceNode::Accept(NodeVisitor* visitor) { |
| visitor->VisitLoopChoice(this); |
| } |
| |
| |
| // ------------------------------------------------------------------- |
| // Emit code. |
| |
| |
| void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler, |
| Guard* guard, |
| Trace* trace) { |
| switch (guard->op()) { |
| case Guard::LT: |
| DCHECK(!trace->mentions_reg(guard->reg())); |
| macro_assembler->IfRegisterGE(guard->reg(), |
| guard->value(), |
| trace->backtrack()); |
| break; |
| case Guard::GEQ: |
| DCHECK(!trace->mentions_reg(guard->reg())); |
| macro_assembler->IfRegisterLT(guard->reg(), |
| guard->value(), |
| trace->backtrack()); |
| break; |
| } |
| } |
| |
| |
| // Returns the number of characters in the equivalence class, omitting those |
| // that cannot occur in the source string because it is Latin1. |
| static int GetCaseIndependentLetters(Isolate* isolate, uc16 character, |
| bool one_byte_subject, |
| unibrow::uchar* letters) { |
| int length = |
| isolate->jsregexp_uncanonicalize()->get(character, '\0', letters); |
| // Unibrow returns 0 or 1 for characters where case independence is |
| // trivial. |
| if (length == 0) { |
| letters[0] = character; |
| length = 1; |
| } |
| |
| if (one_byte_subject) { |
| int new_length = 0; |
| for (int i = 0; i < length; i++) { |
| if (letters[i] <= String::kMaxOneByteCharCode) { |
| letters[new_length++] = letters[i]; |
| } |
| } |
| length = new_length; |
| } |
| |
| return length; |
| } |
| |
| |
| static inline bool EmitSimpleCharacter(Isolate* isolate, |
| RegExpCompiler* compiler, |
| uc16 c, |
| Label* on_failure, |
| int cp_offset, |
| bool check, |
| bool preloaded) { |
| RegExpMacroAssembler* assembler = compiler->macro_assembler(); |
| bool bound_checked = false; |
| if (!preloaded) { |
| assembler->LoadCurrentCharacter( |
| cp_offset, |
| on_failure, |
| check); |
| bound_checked = true; |
| } |
| assembler->CheckNotCharacter(c, on_failure); |
| return bound_checked; |
| } |
| |
| |
| // Only emits non-letters (things that don't have case). Only used for case |
| // independent matches. |
| static inline bool EmitAtomNonLetter(Isolate* isolate, |
| RegExpCompiler* compiler, |
| uc16 c, |
| Label* on_failure, |
| int cp_offset, |
| bool check, |
| bool preloaded) { |
| RegExpMacroAssembler* macro_assembler = compiler->macro_assembler(); |
| bool one_byte = compiler->one_byte(); |
| unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; |
| int length = GetCaseIndependentLetters(isolate, c, one_byte, chars); |
| if (length < 1) { |
| // This can't match. Must be an one-byte subject and a non-one-byte |
| // character. We do not need to do anything since the one-byte pass |
| // already handled this. |
| return false; // Bounds not checked. |
| } |
| bool checked = false; |
| // We handle the length > 1 case in a later pass. |
| if (length == 1) { |
| if (one_byte && c > String::kMaxOneByteCharCodeU) { |
| // Can't match - see above. |
| return false; // Bounds not checked. |
| } |
| if (!preloaded) { |
| macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check); |
| checked = check; |
| } |
| macro_assembler->CheckNotCharacter(c, on_failure); |
| } |
| return checked; |
| } |
| |
| |
| static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler, |
| bool one_byte, uc16 c1, uc16 c2, |
| Label* on_failure) { |
| uc16 char_mask; |
| if (one_byte) { |
| char_mask = String::kMaxOneByteCharCode; |
| } else { |
| char_mask = String::kMaxUtf16CodeUnit; |
| } |
| uc16 exor = c1 ^ c2; |
| // Check whether exor has only one bit set. |
| if (((exor - 1) & exor) == 0) { |
| // If c1 and c2 differ only by one bit. |
| // Ecma262UnCanonicalize always gives the highest number last. |
| DCHECK(c2 > c1); |
| uc16 mask = char_mask ^ exor; |
| macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure); |
| return true; |
| } |
| DCHECK(c2 > c1); |
| uc16 diff = c2 - c1; |
| if (((diff - 1) & diff) == 0 && c1 >= diff) { |
| // If the characters differ by 2^n but don't differ by one bit then |
| // subtract the difference from the found character, then do the or |
| // trick. We avoid the theoretical case where negative numbers are |
| // involved in order to simplify code generation. |
| uc16 mask = char_mask ^ diff; |
| macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff, |
| diff, |
| mask, |
| on_failure); |
| return true; |
| } |
| return false; |
| } |
| |
| |
| typedef bool EmitCharacterFunction(Isolate* isolate, |
| RegExpCompiler* compiler, |
| uc16 c, |
| Label* on_failure, |
| int cp_offset, |
| bool check, |
| bool preloaded); |
| |
| // Only emits letters (things that have case). Only used for case independent |
| // matches. |
| static inline bool EmitAtomLetter(Isolate* isolate, |
| RegExpCompiler* compiler, |
| uc16 c, |
| Label* on_failure, |
| int cp_offset, |
| bool check, |
| bool preloaded) { |
| RegExpMacroAssembler* macro_assembler = compiler->macro_assembler(); |
| bool one_byte = compiler->one_byte(); |
| unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; |
| int length = GetCaseIndependentLetters(isolate, c, one_byte, chars); |
| if (length <= 1) return false; |
| // We may not need to check against the end of the input string |
| // if this character lies before a character that matched. |
| if (!preloaded) { |
| macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check); |
| } |
| Label ok; |
| DCHECK_EQ(4, unibrow::Ecma262UnCanonicalize::kMaxWidth); |
| switch (length) { |
| case 2: { |
| if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0], |
| chars[1], on_failure)) { |
| } else { |
| macro_assembler->CheckCharacter(chars[0], &ok); |
| macro_assembler->CheckNotCharacter(chars[1], on_failure); |
| macro_assembler->Bind(&ok); |
| } |
| break; |
| } |
| case 4: |
| macro_assembler->CheckCharacter(chars[3], &ok); |
| // Fall through! |
| case 3: |
| macro_assembler->CheckCharacter(chars[0], &ok); |
| macro_assembler->CheckCharacter(chars[1], &ok); |
| macro_assembler->CheckNotCharacter(chars[2], on_failure); |
| macro_assembler->Bind(&ok); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| return true; |
| } |
| |
| |
| static void EmitBoundaryTest(RegExpMacroAssembler* masm, |
| int border, |
| Label* fall_through, |
| Label* above_or_equal, |
| Label* below) { |
| if (below != fall_through) { |
| masm->CheckCharacterLT(border, below); |
| if (above_or_equal != fall_through) masm->GoTo(above_or_equal); |
| } else { |
| masm->CheckCharacterGT(border - 1, above_or_equal); |
| } |
| } |
| |
| |
| static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm, |
| int first, |
| int last, |
| Label* fall_through, |
| Label* in_range, |
| Label* out_of_range) { |
| if (in_range == fall_through) { |
| if (first == last) { |
| masm->CheckNotCharacter(first, out_of_range); |
| } else { |
| masm->CheckCharacterNotInRange(first, last, out_of_range); |
| } |
| } else { |
| if (first == last) { |
| masm->CheckCharacter(first, in_range); |
| } else { |
| masm->CheckCharacterInRange(first, last, in_range); |
| } |
| if (out_of_range != fall_through) masm->GoTo(out_of_range); |
| } |
| } |
| |
| |
| // even_label is for ranges[i] to ranges[i + 1] where i - start_index is even. |
| // odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd. |
| static void EmitUseLookupTable( |
| RegExpMacroAssembler* masm, |
| ZoneList<int>* ranges, |
| int start_index, |
| int end_index, |
| int min_char, |
| Label* fall_through, |
| Label* even_label, |
| Label* odd_label) { |
| static const int kSize = RegExpMacroAssembler::kTableSize; |
| static const int kMask = RegExpMacroAssembler::kTableMask; |
| |
| int base = (min_char & ~kMask); |
| USE(base); |
| |
| // Assert that everything is on one kTableSize page. |
| for (int i = start_index; i <= end_index; i++) { |
| DCHECK_EQ(ranges->at(i) & ~kMask, base); |
| } |
| DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base); |
| |
| char templ[kSize]; |
| Label* on_bit_set; |
| Label* on_bit_clear; |
| int bit; |
| if (even_label == fall_through) { |
| on_bit_set = odd_label; |
| on_bit_clear = even_label; |
| bit = 1; |
| } else { |
| on_bit_set = even_label; |
| on_bit_clear = odd_label; |
| bit = 0; |
| } |
| for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) { |
| templ[i] = bit; |
| } |
| int j = 0; |
| bit ^= 1; |
| for (int i = start_index; i < end_index; i++) { |
| for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) { |
| templ[j] = bit; |
| } |
| bit ^= 1; |
| } |
| for (int i = j; i < kSize; i++) { |
| templ[i] = bit; |
| } |
| Factory* factory = masm->isolate()->factory(); |
| // TODO(erikcorry): Cache these. |
| Handle<ByteArray> ba = factory->NewByteArray(kSize, TENURED); |
| for (int i = 0; i < kSize; i++) { |
| ba->set(i, templ[i]); |
| } |
| masm->CheckBitInTable(ba, on_bit_set); |
| if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear); |
| } |
| |
| |
| static void CutOutRange(RegExpMacroAssembler* masm, |
| ZoneList<int>* ranges, |
| int start_index, |
| int end_index, |
| int cut_index, |
| Label* even_label, |
| Label* odd_label) { |
| bool odd = (((cut_index - start_index) & 1) == 1); |
| Label* in_range_label = odd ? odd_label : even_label; |
| Label dummy; |
| EmitDoubleBoundaryTest(masm, |
| ranges->at(cut_index), |
| ranges->at(cut_index + 1) - 1, |
| &dummy, |
| in_range_label, |
| &dummy); |
| DCHECK(!dummy.is_linked()); |
| // Cut out the single range by rewriting the array. This creates a new |
| // range that is a merger of the two ranges on either side of the one we |
| // are cutting out. The oddity of the labels is preserved. |
| for (int j = cut_index; j > start_index; j--) { |
| ranges->at(j) = ranges->at(j - 1); |
| } |
| for (int j = cut_index + 1; j < end_index; j++) { |
| ranges->at(j) = ranges->at(j + 1); |
| } |
| } |
| |
| |
| // Unicode case. Split the search space into kSize spaces that are handled |
| // with recursion. |
| static void SplitSearchSpace(ZoneList<int>* ranges, |
| int start_index, |
| int end_index, |
| int* new_start_index, |
| int* new_end_index, |
| int* border) { |
| static const int kSize = RegExpMacroAssembler::kTableSize; |
| static const int kMask = RegExpMacroAssembler::kTableMask; |
| |
| int first = ranges->at(start_index); |
| int last = ranges->at(end_index) - 1; |
| |
| *new_start_index = start_index; |
| *border = (ranges->at(start_index) & ~kMask) + kSize; |
| while (*new_start_index < end_index) { |
| if (ranges->at(*new_start_index) > *border) break; |
| (*new_start_index)++; |
| } |
| // new_start_index is the index of the first edge that is beyond the |
| // current kSize space. |
| |
| // For very large search spaces we do a binary chop search of the non-Latin1 |
| // space instead of just going to the end of the current kSize space. The |
| // heuristics are complicated a little by the fact that any 128-character |
| // encoding space can be quickly tested with a table lookup, so we don't |
| // wish to do binary chop search at a smaller granularity than that. A |
| // 128-character space can take up a lot of space in the ranges array if, |
| // for example, we only want to match every second character (eg. the lower |
| // case characters on some Unicode pages). |
| int binary_chop_index = (end_index + start_index) / 2; |
| // The first test ensures that we get to the code that handles the Latin1 |
| // range with a single not-taken branch, speeding up this important |
| // character range (even non-Latin1 charset-based text has spaces and |
| // punctuation). |
| if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case. |
| end_index - start_index > (*new_start_index - start_index) * 2 && |
| last - first > kSize * 2 && binary_chop_index > *new_start_index && |
| ranges->at(binary_chop_index) >= first + 2 * kSize) { |
| int scan_forward_for_section_border = binary_chop_index;; |
| int new_border = (ranges->at(binary_chop_index) | kMask) + 1; |
| |
| while (scan_forward_for_section_border < end_index) { |
| if (ranges->at(scan_forward_for_section_border) > new_border) { |
| *new_start_index = scan_forward_for_section_border; |
| *border = new_border; |
| break; |
| } |
| scan_forward_for_section_border++; |
| } |
| } |
| |
| DCHECK(*new_start_index > start_index); |
| *new_end_index = *new_start_index - 1; |
| if (ranges->at(*new_end_index) == *border) { |
| (*new_end_index)--; |
| } |
| if (*border >= ranges->at(end_index)) { |
| *border = ranges->at(end_index); |
| *new_start_index = end_index; // Won't be used. |
| *new_end_index = end_index - 1; |
| } |
| } |
| |
| // Gets a series of segment boundaries representing a character class. If the |
| // character is in the range between an even and an odd boundary (counting from |
| // start_index) then go to even_label, otherwise go to odd_label. We already |
| // know that the character is in the range of min_char to max_char inclusive. |
| // Either label can be nullptr indicating backtracking. Either label can also |
| // be equal to the fall_through label. |
| static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges, |
| int start_index, int end_index, uc32 min_char, |
| uc32 max_char, Label* fall_through, |
| Label* even_label, Label* odd_label) { |
| DCHECK_LE(min_char, String::kMaxUtf16CodeUnit); |
| DCHECK_LE(max_char, String::kMaxUtf16CodeUnit); |
| |
| int first = ranges->at(start_index); |
| int last = ranges->at(end_index) - 1; |
| |
| DCHECK_LT(min_char, first); |
| |
| // Just need to test if the character is before or on-or-after |
| // a particular character. |
| if (start_index == end_index) { |
| EmitBoundaryTest(masm, first, fall_through, even_label, odd_label); |
| return; |
| } |
| |
| // Another almost trivial case: There is one interval in the middle that is |
| // different from the end intervals. |
| if (start_index + 1 == end_index) { |
| EmitDoubleBoundaryTest( |
| masm, first, last, fall_through, even_label, odd_label); |
| return; |
| } |
| |
| // It's not worth using table lookup if there are very few intervals in the |
| // character class. |
| if (end_index - start_index <= 6) { |
| // It is faster to test for individual characters, so we look for those |
| // first, then try arbitrary ranges in the second round. |
| static int kNoCutIndex = -1; |
| int cut = kNoCutIndex; |
| for (int i = start_index; i < end_index; i++) { |
| if (ranges->at(i) == ranges->at(i + 1) - 1) { |
| cut = i; |
| break; |
| } |
| } |
| if (cut == kNoCutIndex) cut = start_index; |
| CutOutRange( |
| masm, ranges, start_index, end_index, cut, even_label, odd_label); |
| DCHECK_GE(end_index - start_index, 2); |
| GenerateBranches(masm, |
| ranges, |
| start_index + 1, |
| end_index - 1, |
| min_char, |
| max_char, |
| fall_through, |
| even_label, |
| odd_label); |
| return; |
| } |
| |
| // If there are a lot of intervals in the regexp, then we will use tables to |
| // determine whether the character is inside or outside the character class. |
| static const int kBits = RegExpMacroAssembler::kTableSizeBits; |
| |
| if ((max_char >> kBits) == (min_char >> kBits)) { |
| EmitUseLookupTable(masm, |
| ranges, |
| start_index, |
| end_index, |
| min_char, |
| fall_through, |
| even_label, |
| odd_label); |
| return; |
| } |
| |
| if ((min_char >> kBits) != (first >> kBits)) { |
| masm->CheckCharacterLT(first, odd_label); |
| GenerateBranches(masm, |
| ranges, |
| start_index + 1, |
| end_index, |
| first, |
| max_char, |
| fall_through, |
| odd_label, |
| even_label); |
| return; |
| } |
| |
| int new_start_index = 0; |
| int new_end_index = 0; |
| int border = 0; |
| |
| SplitSearchSpace(ranges, |
| start_index, |
| end_index, |
| &new_start_index, |
| &new_end_index, |
| &border); |
| |
| Label handle_rest; |
| Label* above = &handle_rest; |
| if (border == last + 1) { |
| // We didn't find any section that started after the limit, so everything |
| // above the border is one of the terminal labels. |
| above = (end_index & 1) != (start_index & 1) ? odd_label : even_label; |
| DCHECK(new_end_index == end_index - 1); |
| } |
| |
| DCHECK_LE(start_index, new_end_index); |
| DCHECK_LE(new_start_index, end_index); |
| DCHECK_LT(start_index, new_start_index); |
| DCHECK_LT(new_end_index, end_index); |
| DCHECK(new_end_index + 1 == new_start_index || |
| (new_end_index + 2 == new_start_index && |
| border == ranges->at(new_end_index + 1))); |
| DCHECK_LT(min_char, border - 1); |
| DCHECK_LT(border, max_char); |
| DCHECK_LT(ranges->at(new_end_index), border); |
| DCHECK(border < ranges->at(new_start_index) || |
| (border == ranges->at(new_start_index) && |
| new_start_index == end_index && |
| new_end_index == end_index - 1 && |
| border == last + 1)); |
| DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1)); |
| |
| masm->CheckCharacterGT(border - 1, above); |
| Label dummy; |
| GenerateBranches(masm, |
| ranges, |
| start_index, |
| new_end_index, |
| min_char, |
| border - 1, |
| &dummy, |
| even_label, |
| odd_label); |
| if (handle_rest.is_linked()) { |
| masm->Bind(&handle_rest); |
| bool flip = (new_start_index & 1) != (start_index & 1); |
| GenerateBranches(masm, |
| ranges, |
| new_start_index, |
| end_index, |
| border, |
| max_char, |
| &dummy, |
| flip ? odd_label : even_label, |
| flip ? even_label : odd_label); |
| } |
| } |
| |
| |
| static void EmitCharClass(RegExpMacroAssembler* macro_assembler, |
| RegExpCharacterClass* cc, bool one_byte, |
| Label* on_failure, int cp_offset, bool check_offset, |
| bool preloaded, Zone* zone) { |
| ZoneList<CharacterRange>* ranges = cc->ranges(zone); |
| CharacterRange::Canonicalize(ranges); |
| |
| int max_char; |
| if (one_byte) { |
| max_char = String::kMaxOneByteCharCode; |
| } else { |
| max_char = String::kMaxUtf16CodeUnit; |
| } |
| |
| int range_count = ranges->length(); |
| |
| int last_valid_range = range_count - 1; |
| while (last_valid_range >= 0) { |
| CharacterRange& range = ranges->at(last_valid_range); |
| if (range.from() <= max_char) { |
| break; |
| } |
| last_valid_range--; |
| } |
| |
| if (last_valid_range < 0) { |
| if (!cc->is_negated()) { |
| macro_assembler->GoTo(on_failure); |
| } |
| if (check_offset) { |
| macro_assembler->CheckPosition(cp_offset, on_failure); |
| } |
| return; |
| } |
| |
| if (last_valid_range == 0 && |
| ranges->at(0).IsEverything(max_char)) { |
| if (cc->is_negated()) { |
| macro_assembler->GoTo(on_failure); |
| } else { |
| // This is a common case hit by non-anchored expressions. |
| if (check_offset) { |
| macro_assembler->CheckPosition(cp_offset, on_failure); |
| } |
| } |
| return; |
| } |
| |
| if (!preloaded) { |
| macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset); |
| } |
| |
| if (cc->is_standard(zone) && |
| macro_assembler->CheckSpecialCharacterClass(cc->standard_type(), |
| on_failure)) { |
| return; |
| } |
| |
| |
| // A new list with ascending entries. Each entry is a code unit |
| // where there is a boundary between code units that are part of |
| // the class and code units that are not. Normally we insert an |
| // entry at zero which goes to the failure label, but if there |
| // was already one there we fall through for success on that entry. |
| // Subsequent entries have alternating meaning (success/failure). |
| ZoneList<int>* range_boundaries = |
| new(zone) ZoneList<int>(last_valid_range, zone); |
| |
| bool zeroth_entry_is_failure = !cc->is_negated(); |
| |
| for (int i = 0; i <= last_valid_range; i++) { |
| CharacterRange& range = ranges->at(i); |
| if (range.from() == 0) { |
| DCHECK_EQ(i, 0); |
| zeroth_entry_is_failure = !zeroth_entry_is_failure; |
| } else { |
| range_boundaries->Add(range.from(), zone); |
| } |
| range_boundaries->Add(range.to() + 1, zone); |
| } |
| int end_index = range_boundaries->length() - 1; |
| if (range_boundaries->at(end_index) > max_char) { |
| end_index--; |
| } |
| |
| Label fall_through; |
| GenerateBranches(macro_assembler, |
| range_boundaries, |
| 0, // start_index. |
| end_index, |
| 0, // min_char. |
| max_char, |
| &fall_through, |
| zeroth_entry_is_failure ? &fall_through : on_failure, |
| zeroth_entry_is_failure ? on_failure : &fall_through); |
| macro_assembler->Bind(&fall_through); |
| } |
| |
| |
| RegExpNode::~RegExpNode() { |
| } |
| |
| |
| RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler, |
| Trace* trace) { |
| // If we are generating a greedy loop then don't stop and don't reuse code. |
| if (trace->stop_node() != nullptr) { |
| return CONTINUE; |
| } |
| |
| RegExpMacroAssembler* macro_assembler = compiler->macro_assembler(); |
| if (trace->is_trivial()) { |
| if (label_.is_bound() || on_work_list() || !KeepRecursing(compiler)) { |
| // If a generic version is already scheduled to be generated or we have |
| // recursed too deeply then just generate a jump to that code. |
| macro_assembler->GoTo(&label_); |
| // This will queue it up for generation of a generic version if it hasn't |
| // already been queued. |
| compiler->AddWork(this); |
| return DONE; |
| } |
| // Generate generic version of the node and bind the label for later use. |
| macro_assembler->Bind(&label_); |
| return CONTINUE; |
| } |
| |
| // We are being asked to make a non-generic version. Keep track of how many |
| // non-generic versions we generate so as not to overdo it. |
| trace_count_++; |
| if (KeepRecursing(compiler) && compiler->optimize() && |
| trace_count_ < kMaxCopiesCodeGenerated) { |
| return CONTINUE; |
| } |
| |
| // If we get here code has been generated for this node too many times or |
| // recursion is too deep. Time to switch to a generic version. The code for |
| // generic versions above can handle deep recursion properly. |
| bool was_limiting = compiler->limiting_recursion(); |
| compiler->set_limiting_recursion(true); |
| trace->Flush(compiler, this); |
| compiler->set_limiting_recursion(was_limiting); |
| return DONE; |
| } |
| |
| |
| bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) { |
| return !compiler->limiting_recursion() && |
| compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion; |
| } |
| |
| |
| int ActionNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| if (budget <= 0) return 0; |
| if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input! |
| return on_success()->EatsAtLeast(still_to_find, |
| budget - 1, |
| not_at_start); |
| } |
| |
| |
| void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget, |
| BoyerMooreLookahead* bm, bool not_at_start) { |
| if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) { |
| on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start); |
| } |
| SaveBMInfo(bm, not_at_start, offset); |
| } |
| |
| |
| int AssertionNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| if (budget <= 0) return 0; |
| // If we know we are not at the start and we are asked "how many characters |
| // will you match if you succeed?" then we can answer anything since false |
| // implies false. So lets just return the max answer (still_to_find) since |
| // that won't prevent us from preloading a lot of characters for the other |
| // branches in the node graph. |
| if (assertion_type() == AT_START && not_at_start) return still_to_find; |
| return on_success()->EatsAtLeast(still_to_find, |
| budget - 1, |
| not_at_start); |
| } |
| |
| |
| void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget, |
| BoyerMooreLookahead* bm, bool not_at_start) { |
| // Match the behaviour of EatsAtLeast on this node. |
| if (assertion_type() == AT_START && not_at_start) return; |
| on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start); |
| SaveBMInfo(bm, not_at_start, offset); |
| } |
| |
| |
| int BackReferenceNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| if (read_backward()) return 0; |
| if (budget <= 0) return 0; |
| return on_success()->EatsAtLeast(still_to_find, |
| budget - 1, |
| not_at_start); |
| } |
| |
| |
| int TextNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| if (read_backward()) return 0; |
| int answer = Length(); |
| if (answer >= still_to_find) return answer; |
| if (budget <= 0) return answer; |
| // We are not at start after this node so we set the last argument to 'true'. |
| return answer + on_success()->EatsAtLeast(still_to_find - answer, |
| budget - 1, |
| true); |
| } |
| |
| |
| int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget, |
| bool not_at_start) { |
| if (budget <= 0) return 0; |
| // Alternative 0 is the negative lookahead, alternative 1 is what comes |
| // afterwards. |
| RegExpNode* node = alternatives_->at(1).node(); |
| return node->EatsAtLeast(still_to_find, budget - 1, not_at_start); |
| } |
| |
| |
| void NegativeLookaroundChoiceNode::GetQuickCheckDetails( |
| QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in, |
| bool not_at_start) { |
| // Alternative 0 is the negative lookahead, alternative 1 is what comes |
| // afterwards. |
| RegExpNode* node = alternatives_->at(1).node(); |
| return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start); |
| } |
| |
| |
| int ChoiceNode::EatsAtLeastHelper(int still_to_find, |
| int budget, |
| RegExpNode* ignore_this_node, |
| bool not_at_start) { |
| if (budget <= 0) return 0; |
| int min = 100; |
| int choice_count = alternatives_->length(); |
| budget = (budget - 1) / choice_count; |
| for (int i = 0; i < choice_count; i++) { |
| RegExpNode* node = alternatives_->at(i).node(); |
| if (node == ignore_this_node) continue; |
| int node_eats_at_least = |
| node->EatsAtLeast(still_to_find, budget, not_at_start); |
| if (node_eats_at_least < min) min = node_eats_at_least; |
| if (min == 0) return 0; |
| } |
| return min; |
| } |
| |
| |
| int LoopChoiceNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| return EatsAtLeastHelper(still_to_find, |
| budget - 1, |
| loop_node_, |
| not_at_start); |
| } |
| |
| |
| int ChoiceNode::EatsAtLeast(int still_to_find, |
| int budget, |
| bool not_at_start) { |
| return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start); |
| } |
| |
| |
| // Takes the left-most 1-bit and smears it out, setting all bits to its right. |
| static inline uint32_t SmearBitsRight(uint32_t v) { |
| v |= v >> 1; |
| v |= v >> 2; |
| v |= v >> 4; |
| v |= v >> 8; |
| v |= v >> 16; |
| return v; |
| } |
| |
| |
| bool QuickCheckDetails::Rationalize(bool asc) { |
| bool found_useful_op = false; |
| uint32_t char_mask; |
| if (asc) { |
| char_mask = String::kMaxOneByteCharCode; |
| } else { |
| char_mask = String::kMaxUtf16CodeUnit; |
| } |
| mask_ = 0; |
| value_ = 0; |
| int char_shift = 0; |
| for (int i = 0; i < characters_; i++) { |
| Position* pos = &positions_[i]; |
| if ((pos->mask & String::kMaxOneByteCharCode) != 0) { |
| found_useful_op = true; |
| } |
| mask_ |= (pos->mask & char_mask) << char_shift; |
| value_ |= (pos->value & char_mask) << char_shift; |
| char_shift += asc ? 8 : 16; |
| } |
| return found_useful_op; |
| } |
| |
| |
| bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler, |
| Trace* bounds_check_trace, |
| Trace* trace, |
| bool preload_has_checked_bounds, |
| Label* on_possible_success, |
| QuickCheckDetails* details, |
| bool fall_through_on_failure) { |
| if (details->characters() == 0) return false; |
| GetQuickCheckDetails( |
| details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE); |
| if (details->cannot_match()) return false; |
| if (!details->Rationalize(compiler->one_byte())) return false; |
| DCHECK(details->characters() == 1 || |
| compiler->macro_assembler()->CanReadUnaligned()); |
| uint32_t mask = details->mask(); |
| uint32_t value = details->value(); |
| |
| RegExpMacroAssembler* assembler = compiler->macro_assembler(); |
| |
| if (trace->characters_preloaded() != details->characters()) { |
| DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset()); |
| // We are attempting to preload the minimum number of characters |
| // any choice would eat, so if the bounds check fails, then none of the |
| // choices can succeed, so we can just immediately backtrack, rather |
| // than go to the next choice. |
| assembler->LoadCurrentCharacter(trace->cp_offset(), |
| bounds_check_trace->backtrack(), |
| !preload_has_checked_bounds, |
| details->characters()); |
| } |
| |
| |
| bool need_mask = true; |
| |
| if (details->characters() == 1) { |
| // If number of characters preloaded is 1 then we used a byte or 16 bit |
| // load so the value is already masked down. |
| uint32_t char_mask; |
| if (compiler->one_byte()) { |
| char_mask = String::kMaxOneByteCharCode; |
| } else { |
| char_mask = String::kMaxUtf16CodeUnit; |
| } |
| if ((mask & char_mask) == char_mask) need_mask = false; |
| mask &= char_mask; |
| } else { |
| // For 2-character preloads in one-byte mode or 1-character preloads in |
| // two-byte mode we also use a 16 bit load with zero extend. |
| static const uint32_t kTwoByteMask = 0xFFFF; |
| static const uint32_t kFourByteMask = 0xFFFFFFFF; |
| if (details->characters() == 2 && compiler->one_byte()) { |
| if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false; |
| } else if (details->characters() == 1 && !compiler->one_byte()) { |
| if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false; |
| } else { |
| if (mask == kFourByteMask) need_mask = false; |
| } |
| } |
| |
| if (fall_through_on_failure) { |
| if (need_mask) { |
| assembler->CheckCharacterAfterAnd(value, mask, on_possible_success); |
| } else { |
| assembler->CheckCharacter(value, on_possible_success); |
| } |
| } else { |
| if (need_mask) { |
| assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack()); |
| } else { |
| assembler->CheckNotCharacter(value, trace->backtrack()); |
| } |
| } |
| return true; |
| } |
| |
| |
| // Here is the meat of GetQuickCheckDetails (see also the comment on the |
| // super-class in the .h file). |
| // |
| // We iterate along the text object, building up for each character a |
| // mask and value that can be used to test for a quick failure to match. |
| // The masks and values for the positions will be combined into a single |
| // machine word for the current character width in order to be used in |
| // generating a quick check. |
| void TextNode::GetQuickCheckDetails(QuickCheckDetails* details, |
| RegExpCompiler* compiler, |
| int characters_filled_in, |
| bool not_at_start) { |
| // Do not collect any quick check details if the text node reads backward, |
| // since it reads in the opposite direction than we use for quick checks. |
| if (read_backward()) return; |
| Isolate* isolate = compiler->macro_assembler()->isolate(); |
| DCHECK(characters_filled_in < details->characters()); |
| int characters = details->characters(); |
| int char_mask; |
| if (compiler->one_byte()) { |
| char_mask = String::kMaxOneByteCharCode; |
| } else { |
| char_mask = String::kMaxUtf16CodeUnit; |
| } |
| for (int k = 0; k < elements()->length(); k++) { |
| TextElement elm = elements()->at(k); |
| if (elm.text_type() == TextElement::ATOM) { |
| Vector<const uc16> quarks = elm.atom()->data(); |
| for (int i = 0; i < characters && i < quarks.length(); i++) { |
| QuickCheckDetails::Position* pos = |
| details->positions(characters_filled_in); |
| uc16 c = quarks[i]; |
| if (elm.atom()->ignore_case()) { |
| unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; |
| int length = GetCaseIndependentLetters(isolate, c, |
| compiler->one_byte(), chars); |
| if (length == 0) { |
| // This can happen because all case variants are non-Latin1, but we |
| // know the input is Latin1. |
| details->set_cannot_match(); |
| pos->determines_perfectly = false; |
| return; |
| } |
| if (length == 1) { |
| // This letter has no case equivalents, so it's nice and simple |
| // and the mask-compare will determine definitely whether we have |
| // a match at this character position. |
| pos->mask = char_mask; |
| pos->value = c; |
| pos->determines_perfectly = true; |
| } else { |
| uint32_t common_bits = char_mask; |
| uint32_t bits = chars[0]; |
| for (int j = 1; j < length; j++) { |
| uint32_t differing_bits = ((chars[j] & common_bits) ^ bits); |
| common_bits ^= differing_bits; |
| bits &= common_bits; |
| } |
| // If length is 2 and common bits has only one zero in it then |
| // our mask and compare instruction will determine definitely |
| // whether we have a match at this character position. Otherwise |
| // it can only be an approximate check. |
| uint32_t one_zero = (common_bits | ~char_mask); |
| if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) { |
| pos->determines_perfectly = true; |
| } |
| pos->mask = common_bits; |
| pos->value = bits; |
| } |
| } else { |
| // Don't ignore case. Nice simple case where the mask-compare will |
| // determine definitely whether we have a match at this character |
| // position. |
| if (c > char_mask) { |
| details->set_cannot_match(); |
| pos->determines_perfectly = false; |
| return; |
| } |
| pos->mask = char_mask; |
| pos->value = c; |
| pos->determines_perfectly = true; |
| } |
| characters_filled_in++; |
| DCHECK(characters_filled_in <= details->characters()); |
| if (characters_filled_in == details->characters()) { |
| return; |
| } |
| } |
| } else { |
| QuickCheckDetails::Position* pos = |
| details->positions(characters_filled_in); |
| RegExpCharacterClass* tree = elm.char_class(); |
| ZoneList<CharacterRange>* ranges = tree->ranges(zone()); |
| DCHECK(!ranges->is_empty()); |
| if (tree->is_negated()) { |
| // A quick check uses multi-character mask and compare. There is no |
| // useful way to incorporate a negative char class into this scheme |
| // so we just conservatively create a mask and value that will always |
| // succeed. |
| pos->mask = 0; |
| pos->value = 0; |
| } else { |
| int first_range = 0; |
| while (ranges->at(first_range).from() > char_mask) { |
| first_range++; |
| if (first_range == ranges->length()) { |
| details->set_cannot_match(); |
| pos->determines_perfectly = false; |
| return; |
| } |
| } |
| CharacterRange range = ranges->at(first_range); |
| uc16 from = range.from(); |
| uc16 to = range.to(); |
| if (to > char_mask) { |
| to = char_mask; |
| } |
| uint32_t differing_bits = (from ^ to); |
| // A mask and compare is only perfect if the differing bits form a |
| // number like 00011111 with one single block of trailing 1s. |
| if ((differing_bits & (differing_bits + 1)) == 0 && |
| from + differing_bits == to) { |
| pos->determines_perfectly = true; |
| } |
| uint32_t common_bits = ~SmearBitsRight(differing_bits); |
| uint32_t bits = (from & common_bits); |
| for (int i = first_range + 1; i < ranges->length(); i++) { |
| CharacterRange range = ranges->at(i); |
| uc16 from = range.from(); |
| uc16 to = range.to(); |
| if (from > char_mask) continue; |
| if (to > char_mask) to = char_mask; |
| // Here we are combining more ranges into the mask and compare |
| // value. With each new range the mask becomes more sparse and |
| // so the chances of a false positive rise. A character class |
| // with multiple ranges is assumed never to be equivalent to a |
| // mask and compare operation. |
| pos->determines_perfectly = false; |
| uint32_t new_common_bits = (from ^ to); |
| new_common_bits = ~SmearBitsRight(new_common_bits); |
| common_bits &= new_common_bits; |
| bits &= new_common_bits; |
| uint32_t differing_bits = (from & common_bits) ^ bits; |
| common_bits ^= differing_bits; |
| bits &= common_bits; |
| } |
| pos->mask = common_bits; |
| pos->value = bits; |
| } |
| characters_filled_in++; |
| DCHECK(characters_filled_in <= details->characters()); |
| if (characters_filled_in == details->characters()) { |
| return; |
| } |
| } |
| } |
| DCHECK(characters_filled_in != details->characters()); |
| if (!details->cannot_match()) { |
| on_success()-> GetQuickCheckDetails(details, |
| compiler, |
| characters_filled_in, |
| true); |
| } |
| } |
| |
| |
| void QuickCheckDetails::Clear() { |
| for (int i = 0; i < characters_; i++) { |
| positions_[i].mask = 0; |
| positions_[i].value = 0; |
| positions_[i].determines_perfectly = false; |
| } |
| characters_ = 0; |
| } |
| |
| |
| void QuickCheckDetails::Advance(int by, bool one_byte) { |
| if (by >= characters_ || by < 0) { |
| DCHECK_IMPLIES(by < 0, characters_ == 0); |
| Clear(); |
| return; |
| } |
| DCHECK_LE(characters_ - by, 4); |
| DCHECK_LE(characters_, 4); |
| for (int i = 0; i < characters_ - by; i++) { |
| positions_[i] = positions_[by + i]; |
| } |
| for (int i = characters_ - by; i < characters_; i++) { |
| positions_[i].mask = 0; |
| positions_[i].value = 0; |
| positions_[i].determines_perfectly = false; |
| } |
| characters_ -= by; |
| // We could change mask_ and value_ here but we would never advance unless |
| // they had already been used in a check and they won't be used again because |
| // it would gain us nothing. So there's no point. |
| } |
| |
| |
| void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) { |
| DCHECK(characters_ == other->characters_); |
| if (other->cannot_match_) { |
| return; |
| } |
| if (cannot_match_) { |
| *this = *other; |
| return; |
| } |
| for (int i = from_index; i < characters_; i++) { |
| QuickCheckDetails::Position* pos = positions(i); |
| QuickCheckDetails::Position* other_pos = other->positions(i); |
| if (pos->mask != other_pos->mask || |
| pos->value != other_pos->value || |
| !other_pos->determines_perfectly) { |
| // Our mask-compare operation will be approximate unless we have the |
| // exact same operation on both sides of the alternation. |
| pos->determines_perfectly = false; |
| } |
| pos->mask &= other_pos->mask; |
| pos->value &= pos->mask; |
| other_pos->value &= pos->mask; |
| uc16 differing_bits = (pos->value ^ other_pos->value); |
| pos->mask &= ~differing_bits; |
| pos->value &= pos->mask; |
| } |
| } |
| |
| |
| class VisitMarker { |
| public: |
| explicit VisitMarker(NodeInfo* info) : info_(info) { |
| DCHECK(!info->visited); |
| info->visited = true; |
| } |
| ~VisitMarker() { |
| info_->visited = false; |
| } |
| private: |
| NodeInfo* info_; |
| }; |
| |
| RegExpNode* SeqRegExpNode::FilterOneByte(int depth) { |
| if (info()->replacement_calculated) return replacement(); |
| if (depth < 0) return this; |
| DCHECK(!info()->visited); |
| VisitMarker marker(info()); |
| return FilterSuccessor(depth - 1); |
| } |
| |
| RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) { |
| RegExpNode* next = on_success_->FilterOneByte(depth - 1); |
| if (next == nullptr) return set_replacement(nullptr); |
| on_success_ = next; |
| return set_replacement(this); |
| } |
| |
| // We need to check for the following characters: 0x39C 0x3BC 0x178. |
| static inline bool RangeContainsLatin1Equivalents(CharacterRange range) { |
| // TODO(dcarney): this could be a lot more efficient. |
| return range.Contains(0x039C) || range.Contains(0x03BC) || |
| range.Contains(0x0178); |
| } |
| |
| |
| static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) { |
| for (int i = 0; i < ranges->length(); i++) { |
| // TODO(dcarney): this could be a lot more efficient. |
| if (RangeContainsLatin1Equivalents(ranges->at(i))) return true; |
| } |
| return false; |
| } |
| |
| RegExpNode* TextNode::FilterOneByte(int depth) { |
| if (info()->replacement_calculated) return replacement(); |
| if (depth < 0) return this; |
| DCHECK(!info()->visited); |
| VisitMarker marker(info()); |
| int element_count = elements()->length(); |
| for (int i = 0; i < element_count; i++) { |
| TextElement elm = elements()->at(i); |
| if (elm.text_type() == TextElement::ATOM) { |
| Vector<const uc16> quarks = elm.atom()->data(); |
| for (int j = 0; j < quarks.length(); j++) { |
| uint16_t c = quarks[j]; |
| if (c <= String::kMaxOneByteCharCode) continue; |
| if (!IgnoreCase(elm.atom()->flags())) return set_replacement(nullptr); |
| // Here, we need to check for characters whose upper and lower cases |
| // are outside the Latin-1 range. |
| uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c); |
| // Character is outside Latin-1 completely |
| if (converted == 0) return set_replacement(nullptr); |
| // Convert quark to Latin-1 in place. |
| uint16_t* copy = const_cast<uint16_t*>(quarks.start()); |
| copy[j] = converted; |
| } |
| } else { |
| DCHECK(elm.text_type() == TextElement::CHAR_CLASS); |
| RegExpCharacterClass* cc = elm.char_class(); |
| ZoneList<CharacterRange>* ranges = cc->ranges(zone()); |
| CharacterRange::Canonicalize(ranges); |
| // Now they are in order so we only need to look at the first. |
| int range_count = ranges->length(); |
| if (cc->is_negated()) { |
| if (range_count != 0 && |
| ranges->at(0).from() == 0 && |
| ranges->at(0).to() >= String::kMaxOneByteCharCode) { |
| // This will be handled in a later filter. |
| if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges)) |
| continue; |
| return set_replacement(nullptr); |
| } |
| } else { |
| if (range_count == 0 || |
| ranges->at(0).from() > String::kMaxOneByteCharCode) { |
| // This will be handled in a later filter. |
| if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges)) |
| continue; |
| return set_replacement(nullptr); |
| } |
| } |
| } |
| } |
| return FilterSuccessor(depth - 1); |
| } |
| |
| RegExpNode* LoopChoiceNode::FilterOneByte(int depth) { |
| if (info()->replacement_calculated) return replacement(); |
| if (depth < 0) return this; |
| if (info()->visited) return this; |
| { |
| VisitMarker marker(info()); |
| |
| RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1); |
| // If we can't continue after the loop then there is no sense in doing the |
| // loop. |
| if (continue_replacement == nullptr) return set_replacement(nullptr); |
| } |
| |
| return ChoiceNode::FilterOneByte(depth - 1); |
| } |
| |
| RegExpNode* ChoiceNode::FilterOneByte(int depth) { |
| if (info()->replacement_calculated) return replacement(); |
| if (depth < 0) return this; |
| if (info()->visited) return this; |
| VisitMarker marker(info()); |
| int choice_count = alternatives_->length(); |
| |
| for (int i = 0; i < choice_count; i++) { |
| GuardedAlternative alternative = alternatives_->at(i); |
| if (alternative.guards() != nullptr && |
| alternative.guards()->length() != 0) { |
| set_replacement(this); |
| return this; |
| } |
| } |
| |
| int surviving = 0; |
| RegExpNode* survivor = nullptr; |
| for (int i = 0; i < choice_count; i++) { |
| GuardedAlternative alternative = alternatives_->at(i); |
| RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1); |
| DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK. |
| if (replacement != nullptr) { |
| alternatives_->at(i).set_node(replacement); |
| surviving++; |
| survivor = replacement; |
| } |
| } |
| if (surviving <
|