本文算法只是翻阅 bytedance.sonic-cpp 过程中的简单折腾,暂时没有实际意义。

基本的跳转判断

#include <cstring>
#include <cstdint>

// ~std::bit_cast
bool EqBytes4(const char *src, uint32_t target) {
    uint32_t val;
    std::memcpy(&val, src, sizeof(uint32_t));
    return val == target;
}

bool skip_literal_v1(const char *data, size_t &pos,
                  size_t len, uint8_t token) {
    static constexpr uint32_t kNullBin = 0x6c6c756e;
    static constexpr uint32_t kTrueBin = 0x65757274;
    static constexpr uint32_t kFalseBin =
        0x65736c61;  // the binary of 'alse' in false
    auto start = data + pos;
    auto end = data + len + 1;
    switch (token) {
      case 't':
        if (start + 4 < end && EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
        };
        break;
      case 'n':
        if (start + 4 < end && EqBytes4(start, kNullBin)) {
          pos += 4;
          return true;
        };
        break;
      case 'f':
        if (start + 5 < end && EqBytes4(start + 1, kFalseBin)) {
          pos += 5;
          return true;
        }
    }
    return false;
}

这是 sonic-cpp 目前的 parse lazy 实现,就是当你的 JSON 遍历到 ,'t'/'n'/'f' 时,判断是否能跳过,即不做解析(skip literal)。

EqBytes4(char const*, unsigned int):
        cmpl    %esi, (%rdi)
        sete    %al
        ret
skip_literal_v1(char const*, unsigned long&, unsigned long, unsigned char):
        movq    %rdx, %r8
        movq    (%rsi), %rdx
        movq    %rsi, %rax
        leaq    (%rdi,%rdx), %rsi
        leaq    1(%rdi,%r8), %rdi
        cmpb    $110, %cl           // 这里的判断顺序是 'n'->'t'->'f'
        je      .L4                       // 以 L4 为例,见下方注释
        cmpb    $116, %cl
        je      .L5
        cmpb    $102, %cl
        je      .L6
        xorl    %ecx, %ecx
.L3:
        movl    %ecx, %eax
        ret
.L5:
        leaq    4(%rsi), %r8
        xorl    %ecx, %ecx
        cmpq    %rdi, %r8
        jnb     .L3
        cmpl    $1702195828, (%rsi)
        jne     .L3
.L15:
        addq    $4, %rdx
        movq    %rdx, (%rax)
.L8:
        movl    $1, %ecx
        jmp     .L3
.L4:
        leaq    4(%rsi), %r8        // 判断范围
        xorl    %ecx, %ecx
        cmpq    %rdi, %r8
        jnb     .L3
        cmpl    $1819047278, (%rsi) // 判断 "null"
        jne     .L3                 // 失败则准备 %rax=false,返回
        jmp     .L15                // 成功则更新 pos,%rax=true,返回
.L6:
        leaq    5(%rsi), %r8
        xorl    %ecx, %ecx
        cmpq    %rdi, %r8
        jnb     .L3
        cmpl    $1702063201, 1(%rsi)
        jne     .L3
        addq    $5, %rdx
        movq    %rdx, (%rax)
        jmp     .L8

g++-14 -O3 生成上述汇编指令,可以看出即使是 O3 也是很生硬的照搬算法。一个显眼的问题是它没有优化掉 token 判断和长度判断的依赖,我们可以在 v2 算法中把它扣掉,并减少分支。

优化的跳转判断

#include <cstring>
#include <cstdint>

// ~std::bit_cast
bool EqBytes4(const char *src, uint32_t target) {
    uint32_t val;
    std::memcpy(&val, src, sizeof(uint32_t));
    return val == target;
}

bool skip_literal_v2(const char *data, size_t &pos,
                     size_t len, uint8_t token) {
  (void) token;
  static constexpr uint32_t kNullBin = 0x6c6c756e;
  static constexpr uint32_t kTrueBin = 0x65757274;
  static constexpr uint32_t kAlseBin = 0x65736c61;  // the binary of 'alse' in false
  static constexpr uint32_t kFalsBin = 0x736c6166;  // the binary of 'fals' in false
  
  auto start = data + pos;
  auto end = data + len + 1;
  if (start + 5 < end) {
      if (EqBytes4(start, kNullBin) || EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
      }
      if (EqBytes4(start, kFalsBin) && EqBytes4(start + 1, kAlseBin)) {
          pos += 5;
          return true;
      }
  // slow path
  } else if (start + 4 < end) {
      if (EqBytes4(start, kNullBin) || EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
      }
  }
  return false;
}

这种写法就是尝试让编译器生成更加聪明一点,直接看指令吧。

EqBytes4(char const*, unsigned int):
        cmpl    %esi, (%rdi)
        sete    %al
        ret
skip_literal_v2(char const*, unsigned long&, unsigned long, unsigned char):
        movq    %rsi, %rcx          // token 不再占用寄存器 %rcx,实际复用 pos 地址
        movq    (%rsi), %rsi        // %rsi 作为 pos 值
        movq    %rdx, %rax
        leaq    (%rdi,%rsi), %rdx   // 后续用于 end 判断
        leaq    1(%rdi,%rax), %rdi
        leaq    5(%rdx), %rax
        cmpq    %rdi, %rax          // start + 5 < end
        jnb     .L4                 // 慢路径
        movl    (%rdx), %edi
        cmpl    $1702195828, %edi   // 更少的 jcc 分支,null 和 true 并行判断
        sete    %al
        cmpl    $1819047278, %edi
        sete    %r8b
        orb     %r8b, %al
        jne     .L12                // 没问题就 pos+4,并返回
        cmpl    $1936482662, %edi   // fals 判断
        je      .L13                // 后续再尝试判断 alse
.L3:
        ret
.L4:                                // 慢路径基本是重复的操作,失败惩罚是会加重的
        leaq    4(%rdx), %r8
        xorl    %eax, %eax
        cmpq    %rdi, %r8
        jnb     .L3
        movl    (%rdx), %edx
        cmpl    $1702195828, %edx
        sete    %al
        cmpl    $1819047278, %edx
        sete    %dl
        orb     %dl, %al
        je      .L3
.L12:
        addq    $4, %rsi
        movq    %rsi, (%rcx)
.L6:
        movl    $1, %eax
        ret
.L13:
        cmpl    $1702063201, 1(%rdx)
        jne     .L3
        addq    $5, %rsi
        movq    %rsi, (%rcx)
        jmp     .L6

g++-14 -O3 生成上述汇编指令,简单来说通过猜测常规 JSON(快路径)一般能保证大于等于 5 字符,从而避免先判断 token 再根据长度选择分支的做法。指令上也确实看着更加「直」了一点。

基准测试

Google Benchmark 测试代码
#include <cstring>
#include <cstdint>
#include <algorithm>
#include <ranges>
#include <string_view>
#include <array>
#include <string>
#include <iostream>
#include <vector>
#include <random>
#include <ranges>
#include <benchmark/benchmark.h>

auto make(size_t size) {
    constexpr auto elements = std::array<std::string_view, 4> {
        "null", "true", "false",
        "t" // Unexpected value.
    };
    auto nr_each = size / elements.size();
    auto capacity = nr_each * elements.size();
    std::vector<size_t> cp;
    for(auto i : std::views::iota(0) | std::views::take(capacity)) {
        cp.emplace_back(i % elements.size());
    }
    std::ranges::shuffle(cp, std::mt19937{19260817});
    std::string result;
    for(auto i : cp) result += elements[i];
    return result;
}

// ~std::bit_cast
bool EqBytes4(const char *src, uint32_t target) {
    uint32_t val;
    std::memcpy(&val, src, sizeof(uint32_t));
    return val == target;
}

bool skip_literal_v1(const char *data, size_t &pos,
                  size_t len, uint8_t token) {
    static constexpr uint32_t kNullBin = 0x6c6c756e;
    static constexpr uint32_t kTrueBin = 0x65757274;
    static constexpr uint32_t kFalseBin =
        0x65736c61;  // the binary of 'alse' in false
    auto start = data + pos;
    auto end = data + len + 1;
    switch (token) {
      case 't':
        if (start + 4 < end && EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
        };
        break;
      case 'n':
        if (start + 4 < end && EqBytes4(start, kNullBin)) {
          pos += 4;
          return true;
        };
        break;
      case 'f':
        if (start + 5 < end && EqBytes4(start + 1, kFalseBin)) {
          pos += 5;
          return true;
        }
    }
    return false;
}

bool skip_literal_v2(const char *data, size_t &pos,
                     size_t len, uint8_t token) {
  (void) token;
  static constexpr uint32_t kNullBin = 0x6c6c756e;
  static constexpr uint32_t kTrueBin = 0x65757274;
  static constexpr uint32_t kAlseBin = 0x65736c61;  // the binary of 'alse' in false
  static constexpr uint32_t kFalsBin = 0x736c6166;  // the binary of 'fals' in false
  
  auto start = data + pos;
  auto end = data + len + 1;
  if (start + 5 < end) {
      if (EqBytes4(start, kNullBin) || EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
      }
      if (EqBytes4(start, kFalsBin) && EqBytes4(start + 1, kAlseBin)) {
          pos += 5;
          return true;
      }
  // slow path
  } else if (start + 4 < end) {
      if (EqBytes4(start, kNullBin) || EqBytes4(start, kTrueBin)) {
          pos += 4;
          return true;
      }
  }
  return false;
}

template <auto F>
void benchmark_template(auto &state, size_t nr_token) {
    std::string test_string = make(nr_token);
    benchmark::DoNotOptimize(test_string);
    for(auto _ : state) {
        for(size_t cur = 0; cur < test_string.length();) {
            auto result = F(test_string.data(), cur,
                            test_string.length(), test_string[cur]);
            benchmark::DoNotOptimize(result);
            if(!result) cur++;
        }
    }
}

// static std::string simple = make(12);
// template <auto F>
// void test() {
//     int ret = 0;
//     for(size_t cur = 0; cur < simple.length();) {
//         auto result = F(simple.data(), cur, simple.length(), simple[cur]);
//         if(!result) cur++;
//         else ret++;
//     }
//     std::cout << ret << std::endl;
// }
// int main() {
//     std::cout << simple << std::endl;
//     test<skip_literal_v1>();
//     test<skip_literal_v2>();
// }


void BM_skip_literal_v1(benchmark::State& state) {
    benchmark_template<skip_literal_v1>(state, state.range(0));
}

void BM_skip_literal_v2(benchmark::State& state) {
    benchmark_template<skip_literal_v2>(state, state.range(0));
}

BENCHMARK(BM_skip_literal_v1)->Range(8, 1<<26);
BENCHMARK(BM_skip_literal_v2)->Range(8, 1<<26);

BENCHMARK_MAIN();

测试代码加入了一定的失败匹配 "t" 占位符。

理论收益

// v1 的 topdown 报告
$ perf stat -M PipelineL1
 Performance counter stats for './a.out':

    74,722,674,738      de_src_op_disp.all               #      5.8 %  bad_speculation          (50.01%)
    29,557,874,567      ls_not_halted_cyc                #     36.3 %  retiring                 (50.01%)
    64,382,628,404      ex_ret_ops                                                              (50.01%)
        87,109,879      de_no_dispatch_per_slot.smt_contention #      0.0 %  smt_contention           (50.00%)
    29,522,675,914      ls_not_halted_cyc                                                       (50.00%)
    99,571,510,280      de_no_dispatch_per_slot.no_ops_from_frontend #     56.2 %  frontend_bound           (49.99%)
    29,508,518,726      ls_not_halted_cyc                                                       (49.99%)
     2,964,589,640      de_no_dispatch_per_slot.backend_stalls #      1.7 %  backend_bound            (75.00%)
    29,528,187,809      ls_not_halted_cyc                                                       (75.00%)

// v2 的 topdown 报告
$ perf stat -M PipelineL1
 Performance counter stats for './a.out':

   120,682,363,513      de_src_op_disp.all               #      6.9 %  bad_speculation          (50.00%)
    35,017,028,479      ls_not_halted_cyc                #     50.6 %  retiring                 (50.00%)
   106,269,993,747      ex_ret_ops                                                              (50.00%)
       172,887,357      de_no_dispatch_per_slot.smt_contention #      0.1 %  smt_contention           (49.99%)
    35,004,281,404      ls_not_halted_cyc                                                       (49.99%)
    74,874,655,398      de_no_dispatch_per_slot.no_ops_from_frontend #     35.7 %  frontend_bound           (50.00%)
    34,996,749,676      ls_not_halted_cyc                                                       (50.00%)
    14,415,385,833      de_no_dispatch_per_slot.backend_stalls #      6.9 %  backend_bound            (75.01%)
    35,004,379,924      ls_not_halted_cyc                                                       (75.01%)

perf 进行 topdown 分析 给出的理论收益是不错的,v2 版本的有效退役率高了很多(从 36.3% 提升到了 50.6%),并且去掉了 v1 的前端解码瓶颈。

实际收益

----------------------------------------------------------------------
Benchmark                            Time             CPU   Iterations
----------------------------------------------------------------------
BM_skip_literal_v1/8              6.95 ns         6.95 ns     95820402
BM_skip_literal_v1/64             53.6 ns         53.6 ns     12656203
BM_skip_literal_v1/512             574 ns          574 ns      1211370
BM_skip_literal_v1/4096           4794 ns         4794 ns       149035
BM_skip_literal_v1/32768         40615 ns        40616 ns        17224
BM_skip_literal_v1/262144      1332996 ns      1333055 ns          526
BM_skip_literal_v1/2097152    10670524 ns     10669860 ns           66
BM_skip_literal_v1/16777216   84773007 ns     84757464 ns            8
BM_skip_literal_v1/67108864  336120531 ns    336080475 ns            2
BM_skip_literal_v2/8              8.61 ns         8.61 ns     78270109
BM_skip_literal_v2/64             66.6 ns         66.6 ns     10225935
BM_skip_literal_v2/512             541 ns          541 ns      1255752
BM_skip_literal_v2/4096           4680 ns         4679 ns       153072
BM_skip_literal_v2/32768         38968 ns        38967 ns        18201
BM_skip_literal_v2/262144      1244033 ns      1243944 ns          556
BM_skip_literal_v2/2097152    10154790 ns     10154508 ns           69
BM_skip_literal_v2/16777216   80408271 ns     80404233 ns            9
BM_skip_literal_v2/67108864  324939614 ns    324948572 ns            2

但是实际收益挺悲剧的,只有长 JSON(size > 512)才有一点点加速。

正经人谁用大串还高强度 true/false 啊。

THE END

只是写着玩一下,实际意义不大。硬要说的话,v2 确实会快。

但是我觉得人类面向编译器的情况,其实和面向 AI 没区别:因为对分支跳转做调优和做玄学 prompt 一模一样,都是使用可调参数给巨型的黑盒生成最终产物。如果使用 Clang 跑一遍上面代码,v2 还会全面开倒车:因为编译器会不可控地生成不合场景的垃圾指令。