Skip to content

Instantly share code, notes, and snippets.

View zhiqiu's full-sized avatar
👋
hi

Leo Chen zhiqiu

👋
hi
View GitHub Profile
import re
import sys
# 定义一个函数来解析日志文件并统计最大值和最小值
def parse_and_statistics(log_file):
# 初始化一个字典来存储每项的最大值和最小值
stats = {}
# 逐行读取日志文件
with open(log_file, 'r') as file:
@zhiqiu
zhiqiu / git.md
Last active July 15, 2025 07:45
多用户配置git

假设git用户userA和userB使用同一个机器,如何配置git,避免互相冲突呢?

  • 用户A的repo:~/user/userA/repo1
  • 用户B的repo:~/user/userB/repo2
  1. 配置自定义 gitconfig,参考:https://git-scm.com/docs/git-config#_conditional_includes
  • 在~/.gitconfig中添加下面的内容,其中 ~/user/userA/表示在这个目录及其子目录中的git repo,都会使用自定义config文件~/.gitconfig-a
[includeIf "gitdir:~/user/userA/"]
 path = ~/.gitconfig-a
@zhiqiu
zhiqiu / plot.py
Created August 13, 2024 05:24
plot loss curve and the diff of two log
from matplotlib import pyplot as plt
import numpy as np
import warnings
def parse_file(file_name):
speeds = []
losses = []
idxs = []
idx = 0
@zhiqiu
zhiqiu / demo.py
Last active August 26, 2024 09:02
a demo of dp-mp-pp hybrid with auto parallel
# 启动脚本: python3 -m paddle.distributed.launch --device=0,1,2,3,4,5,6,7 demo.py
import paddle
import paddle.distributed as dist
from paddle.io import BatchSampler, DataLoader, Dataset
import numpy as np
import os
mesh0 = dist.ProcessMesh([[0, 1], [2, 3]], dim_names=['x', 'y'])
mesh1 = dist.ProcessMesh([[4, 5], [6, 7]], dim_names=['x', 'y'])
@zhiqiu
zhiqiu / llvm.patch
Created February 6, 2023 07:30
patch for llvm.cmake of CINN in manylinux docker
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index ddf807156327..408307082684 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -48,7 +48,7 @@ endif()
project(LLVM
VERSION ${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}
- LANGUAGES C CXX ASM)
+ LANGUAGES CXX C ASM)
@zhiqiu
zhiqiu / op_desc.json
Created August 18, 2020 05:56
op desc of paddle
{
"abs": {
"Attrs": {
"op_callstack": {
"default_value": [],
"generated": false,
"type": 5
},
"op_device": {
"default_value": "",
@zhiqiu
zhiqiu / gen_layers.py
Created July 29, 2020 11:22
auto-generated python layer
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid import core
from paddle.fluid.layers.layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from paddle.fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
#!bin/bash
# set -xe
if [ $# -lt 3 ]; then
echo "Usage: "
echo " CUDA_VISIBLE_DEVICES=0 bash run.sh train|infer speed|mem|maxbs sp|mp /ssd3/benchmark_results/cwh/logs"
exit
fi
import paddle
# 所有注册op
op_protos = paddle.fluid.framework.get_all_op_protos()
all_registered_ops = [op.type for op in op_protos]
#print('all_registered_ops', len(all_registered_ops), all_registered_ops)
# 无grad的前向op
no_grad_ops = []
# 有grad的前向op
import paddle.fluid as fluid
import numpy as np
shape = [32,32,32]
a = np.random.random(size=shape).astype('float32')
p = a.__array_interface__['data'][0]
# if p is 32 aligned, all following array_equal is True
# refs: https://eigen.tuxfamily.org/bz/show_bug.cgi?id=1728
print(p, hex(p), p%32)
b = a.copy()