PaddlePaddle AiStudio Torch 配置 百度训练平台

PaddlePaddle AiStudio 平台提供 V100 GPU,然而默认只有 PaddlePaddle 百度自有框架,Conda 环境不独立,使用 external-libraries 文件夹存在路径缺陷,所以这里记录改环境到 Torch 的过程,包含一些语音处理包的安装。

目标环境:Python 3.11 + Torch 2.5.1 cu118

阅读全文 »

GTNH 传送权限单独指定

起因

在 GTNH 2.7.2 上,对于 Journey Map 传送点的支持需要 op 管理员权限,给管理员权限容易刷物件和其他意外,所以需要限制权限同时又能方便移动。

阅读全文 »

Linux dGPU Nvidia 独显运行应用

在 Ubuntu 22.04 上

系统带有工具 switcherooctl 可以切换系统使用的 gpu 卡,若默认连接显示到集成显卡,使用此工具时,将使用独显运行应用

若无法运行,可以使用 vkcube 来检查 Vulcan 的可用性,工具来自于包 vulkan-tools 直接 apt 安装即可。

可能出现无法调用情况如下:

vkEnumerateInstanceExtensionProperties failed to find the VK_KHR_surface extension. Do you have a compatible Vulkan installable client driver (ICD) installed? Please look at the Getting Started guide for additional information.

此时需要检查 icd 文件是否可用

ls /usr/share/vulkan/icd.d/nvidia_icd.json

若无文件,则需要安装库

sudo apt install vulkan-tools libvulkan1 libvulkan1:i386

若安装后仍不可用,检查是否有 Nvidia-wrapper

$ ls /usr/share/vulkan/icd.d/ 
intel_hasvk_icd.i686.json intel_icd.x86_64.json nvidia_icd.disabled_by_nv_vulkan_wrapper radeon_icd.x86_64.json intel_hasvk_icd.x86_64.json lvp_icd.i686.json nv_vulkan_wrapper.json virtio_icd.i686.json intel_icd.i686.json lvp_icd.x86_64.json radeon_icd.i686.json virtio_icd.x86_64.json

恢复配置:

sudo mv /usr/share/vulkan/icd.d/nvidia_icd.disabled_by_nv_vulkan_wrapper /usr/share/vulkan/icd.d/nvidia_icd.json

此时再切换独显运行可行。

注意,我并未发现哪部分程序给出了 nv_vulkan_wrapper,若可行,从添加 wrapper 的程序关闭可能更合理

使用环境变量运行 windows 软件

在解决 icd 问题后,环境变量也可以用作切换运行的方式,毕竟 switcherooctl launch 的原理本质也是切换环境变量。

例如使用 Steam 的 Proton 9.0-4 运行 Windows 应用,独显启动:

先要确保手动在 Steam 中添加可执行文件到游戏库中,并在命令行启动的 Steam 上获取其编号,或者在集显运行时查看其环境变量中的 SteamGameId

之后,为应用添加必要的环境变量,无需启动 Steam 即可运行应用本体

注意,最好替换 SteamGameId ,这是 Steam 做运行环境隔离的措施,使用程序自己的环境运行更稳定

此外,一定要替换 proton 的运行路径,我的路径只是 Proton 9.0 配置,随着版本更新,指令会有所不同。

此指令从 ps -aux | grep proton 获取,对于后续 Steam 启动 proton 的指令,可以通过如上方案获取启动指令模版。

__NV_PRIME_RENDER_OFFLOAD=1 __GLX_VENDOR_LIBRARY_NAME=nvidia __VK_LAYER_NV_optimus=NVIDIA_only VK_ICD_FILENAMES=/usr/share/vulkan/icd.d/nvidia_icd.json SteamGameId=2907931734 STEAM_COMPAT_DATA_PATH=$HOME/.local/share/Steam/steamapps/compatdata/$SteamGameId STEAM_COMPAT_CLIENT_INSTALL_PATH=~/.local/share/Steam/ python3 "$HOME/.local/share/Steam/steamapps/common/Proton 9.0 (Beta)/proton" waitforexitandrun "/media/zsig/生活日常/Games/Grand Theft Auto V/GTA5.exe"

Wine 运行

Wine + dxvk 手动运行也可以,例如使用 jadeite 启动 Unity 程序

DXVK_HUD="fps,frametimes,version,gpuload" GST_PLUGIN_PATH="" HOST_LANG="zh_CN.UTF-8" HOST_LC_ALL="zh_CN.UTF-8" LANG="zh_CN.UTF-8" LC_ALL="zh_CN.UTF-8" LD_LIBRARY_PATH="/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/runners/wine-10.1-staging-tkg-amd64/lib:/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/runners/wine-10.1-staging-tkg-amd64/lib/wine/x86_64-unix:/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/runners/wine-10.1-staging-tkg-amd64/lib/wine/i386-unix" WINEARCH="win64" WINEFSYNC="1" WINEPREFIX="/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/prefix" WINE_FULLSCREEN_FSR="1" WINE_FULLSCREEN_FSR_MODE="balanced" WINE_FULLSCREEN_FSR_STRENGTH="2" JADEITE_ALLOW_UNKNOWN=1 bash -c "'/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/runners/wine-10.1-staging-tkg-amd64/bin/wine64'  '/home/zsig/.var/app/moe.launcher.the-honkers-railway-launcher/data/honkers-railway-launcher/patch/jadeite.exe' '/home/zsig/Documents/SkyLines/Cities - Skylines II/Cities2.exe' -- "

MPU6050 参数读取 Jetson

编译动态链接库

I2CDevLib仓库
选用Linux上驱动I2C和MPU6050的代码,克隆LinuxI2CDev文件夹到本地,然后进入到文件夹中,创建一个main.cpp用来创建与Python的函数接口,可以自定义。这里的代码没有考虑零偏,只是从DMP取出四元数换算得到结果的,实际用的时候有不小的零偏,可以添加上初始化时的零偏纠正过程。

#include <stdio.h>  
#include "MPU6050/MPU6050_6Axis_MotionApps20.h"

extern "C" {
    MPU6050 mpu;
    bool dmp_initialized = false;

    void initialize_dmp() {
        // 初始化 MPU6050
        mpu.initialize();

        // 检查设备连接
        if (!mpu.testConnection()) {
            printf("MPU6050 connection failed\n");
            return;
        }

        // 初始化 DMP
        if (mpu.dmpInitialize() != 0) {
            printf("DMP initialization failed\n");
            return;
        }

        // 启用 DMP
        mpu.setDMPEnabled(true);

        dmp_initialized = true;
    }

    void get_yaw_pitch_roll(float *yaw, float *pitch, float *roll) {
        if (!dmp_initialized) {
            printf("DMP not initialized\n");
            return;
        }

        // 获取 DMP 数据包大小
        uint16_t packetSize = mpu.dmpGetFIFOPacketSize();
        uint8_t fifoBuffer[64];

        // 检查是否有可用的 DMP 数据包
        if (mpu.dmpPacketAvailable()) {
            // 读取 DMP 数据包
            mpu.dmpGetCurrentFIFOPacket(fifoBuffer);

            // 获取四元数
            Quaternion q;
            mpu.dmpGetQuaternion(&q, fifoBuffer);

            // 获取重力向量
            VectorFloat gravity;
            mpu.dmpGetGravity(&gravity, &q);

            // 计算 yaw, pitch, roll
            float ypr[3];
            mpu.dmpGetYawPitchRoll(ypr, &q, &gravity);

            // 将结果赋值给输出参数
            *yaw = ypr[0];
            *pitch = ypr[1];
            *roll = ypr[2];
        }
    }
}

配置CmakeLists.txt,当然,在配置成动态链接库之前,可以编译成可执行程序验证正确性。

cmake_minimum_required(VERSION 3.10)  

# 项目名称
project(MPU6050_Project)

# 设置 C++ 标准
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED True)

# 设置 -fPIC 选项
set(CMAKE_POSITION_INDEPENDENT_CODE ON)

# 添加 I2Cdev 库
add_library(I2Cdev I2Cdev/I2Cdev.cpp)
target_include_directories(I2Cdev PUBLIC I2Cdev)

# 添加 MPU6050 库
add_library(MPU6050 MPU6050/MPU6050.cpp MPU6050/MPU6050_6Axis_MotionApps20.cpp)
target_include_directories(MPU6050 PUBLIC MPU6050)

# 链接 I2Cdev 库到 MPU6050 库
target_link_libraries(MPU6050 I2Cdev)

# 添加 main 可执行文件
add_library(mpu6050_lib SHARED main.cpp)
target_include_directories(mpu6050_lib PUBLIC .)

# 链接 MPU6050 库到 main 可执行文件
target_link_libraries(mpu6050_lib MPU6050)

创建build文件夹,进入build文件夹,执行cmake ..,然后make,即可看到libmpu6050_lib.so,记住路径,或者移动到python代码同目录下

调用和获取数据

使用如下代码,可以加载并自动获取yaw, pitch, roll角度。这里使用单独的线程,因为不连续读取时会出现错误数据,原因未知。

# 加载动态链接库
lib = ctypes.CDLL('./cpp/libmpu6050_lib.so')

# 定义初始化函数原型
lib.initialize_dmp.argtypes = []
lib.initialize_dmp.restype = None

# 定义获取角度函数原型
lib.get_yaw_pitch_roll.argtypes = [ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float)]
lib.get_yaw_pitch_roll.restype = None

# 初始化 DMP
lib.initialize_dmp()

# 调用获取角度函数
yaw = ctypes.c_float()
pitch = ctypes.c_float()
roll = ctypes.c_float()


lock = threading.Lock() # 线程锁,用于并发安全
stop_event = threading.Event()

def yaw_read_thread():
global stop_event
# 读取角度
try:
while True:
with lock:
lib.get_yaw_pitch_roll(ctypes.byref(yaw), ctypes.byref(pitch), ctypes.byref(roll))
finally:
stop_event.set()
# print(f"Yaw: {yaw.value}, Pitch: {pitch.value}, Roll: {roll.value}")

# 启动yaw read
threading.Thread(target=yaw_read_thread, daemon=True).start()

蓝牙设备配置 HCI

启动蓝牙设备

检查RFkill列表,蓝牙hci设备是否被Block

rfkill list
$     rfkill list
0: bt_default: Bluetooth
Soft blocked: no
Hard blocked: no
1: phy0: Wireless LAN
Soft blocked: no
Hard blocked: no
2: brcmfmac-wifi: Wireless LAN
Soft blocked: no
Hard blocked: no
3: hci0: Bluetooth
Soft blocked: yes
Hard blocked: no

若blocked yes则

rfkill unblock 3

若需要恢复关闭状态节约电源,使用block命令即可

蓝牙设备属性配置

sudo hciconfig hci0 piscan
sudo hciconfig hci0 iscan

若有需要可以进入bluetoothctl管理,使用discoverable onpairable on来开启蓝牙

蓝牙服务端配置

以L2CAP协议传输数据为例

import bluetooth

def l2cap_server():
server_sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
psm = 0x1001 # L2CAP PSM(Protocol/Service Multiplexer)
server_sock.bind(("", psm))
server_sock.listen(1)

print(f"Waiting for connection on L2CAP PSM {psm}")

client_sock, client_info = server_sock.accept()
print(f"Accepted connection from {client_info}")

try:
while True:
data = client_sock.recv(1024)
if not data:
break
print(f"Received: {data.decode('utf-8')}")
except IOError:
pass
finally:
client_sock.close()
server_sock.close()

if __name__ == "__main__":
l2cap_server()

客户端在配对后连接

import bluetooth

def l2cap_client():
server_address = "XX:XX:XX:XX:XX:XX" # 替换为服务器的蓝牙地址
psm = 0x1001 # L2CAP PSM(Protocol/Service Multiplexer)

sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
sock.connect((server_address, psm))

try:
while True:
data = input("Enter message to send: ")
sock.send(data.encode('utf-8'))
except KeyboardInterrupt:
pass
finally:
sock.close()

if __name__ == "__main__":
l2cap_client()

即可正常传输数据

Rknn 模型运行

前提:有转换好Onnx模型

安装Rknn-toolkit2

git clone https://github.com/airockchip/rknn-toolkit2.git --depth 1

应该有如下文件夹,RockChip合并了之前分散的项目,并且在此代码库更新新内容,旧仓库不再更新

├── rknn-toolkit2
├── rknn-toolkit-lite2
└── rknpu2

先要在个人主机(非板端),根据rknn-toolkit2/packages中的版本号来选择python版本,并且安装对应版本的rknn-toolkit2

注意,随着rknn-toolkit2的更新,版本铁定会不同,看好版本号

例如主机Ubuntu22.04.4(x86_64),packages文件夹内有

.
├── md5sum.txt
├── requirements_cp310-2.1.0.txt
├── requirements_cp311-2.1.0.txt
├── requirements_cp36-2.1.0.txt
├── requirements_cp37-2.1.0.txt
├── requirements_cp38-2.1.0.txt
├── requirements_cp39-2.1.0.txt
├── rknn_toolkit2-2.1.0+708089d1-cp310-cp310-linux_x86_64.whl
├── rknn_toolkit2-2.1.0+708089d1-cp311-cp311-linux_x86_64.whl
├── rknn_toolkit2-2.1.0+708089d1-cp36-cp36m-linux_x86_64.whl
├── rknn_toolkit2-2.1.0+708089d1-cp37-cp37m-linux_x86_64.whl
├── rknn_toolkit2-2.1.0+708089d1-cp38-cp38-linux_x86_64.whl
└── rknn_toolkit2-2.1.0+708089d1-cp39-cp39-linux_x86_64.whl

使用conda创建环境

conda create -n rknn python=3.10

激活环境

conda activate rknn

安装依赖和本体

pip install -r requirements_cp310-2.1.0.txt
pip install rknn_toolkit2-2.1.0+708089d1-cp310-cp310-linux_x86_64.whl

此时已安装完成rknn-toolkit2

转换模型

任意创建文件夹,例如

mkdir onnx2rknn
cd onnx2rknn

将转换代码写入到 onnx2rknn.py

from rknn.api import RKNN
import os

if __name__ == '__main__':
platform = 'rk3588'

'''step 1: create RKNN object'''
rknn = RKNN()

'''step 2: load the .onnx model'''
rknn.config(target_platform='rk3588')
print('--> Loading model')
ret = rknn.load_onnx('gmstereo-scale1-sceneflow-124a438f_1x3x480x640_sim.onnx') # 这里填写onnx模型的路径
if ret != 0:
print('load model failed')
exit(ret)
print('done')

'''step 3: building model'''
print('-->Building model')
ret = rknn.build(do_quantization=False)
if ret != 0:
print('build model failed')
exit()
print('done')

'''step 4: export and save the .rknn model'''
RKNN_MODEL_PATH = 'unimatch_stereo_scale1_1x3x480x640_sim.rknn' # 这里填写rknn模型的名称
ret = rknn.export_rknn(RKNN_MODEL_PATH)
if ret != 0:
print('Export rknn model failed.')
exit(ret)
print('done')

'''step 5: release the model'''
rknn.release()

更改需要输入的onnx模型和输出rknn模型位置

板端rknn-toolkit-lite2安装

与主机端同理,到packages文件夹下,创建conda环境,安装指定包

.
├── rknn_toolkit_lite2-2.1.0-cp310-cp310-linux_aarch64.whl
├── rknn_toolkit_lite2-2.1.0-cp311-cp311-linux_aarch64.whl
├── rknn_toolkit_lite2-2.1.0-cp312-cp312-linux_aarch64.whl
├── rknn_toolkit_lite2-2.1.0-cp37-cp37m-linux_aarch64.whl
├── rknn_toolkit_lite2-2.1.0-cp38-cp38-linux_aarch64.whl
└── rknn_toolkit_lite2-2.1.0-cp39-cp39-linux_aarch64.whl

创建

conda create -n rknn python=3.10
conda activate rknn

安装rknn-toolkit-lite2和opencv

pip install rknn_toolkit_lite2-2.1.0-cp310-cp310-linux_aarch64.whl
pip install opencv-python

测试运行

cd ../examples/resnet18
python test.py

可能会报错

W rknn-toolkit-lite2 version: 2.1.0
--> Load RKNN model
done
--> Init runtime environment
I RKNN: [15:54:49.666] RKNN Runtime Information: librknnrt version: 1.4.0 (a10f100eb@2022-09-09T09:07:14)
I RKNN: [15:54:49.666] RKNN Driver Information: version: 0.9.3
E RKNN: [15:54:49.666] 6, 1
E RKNN: [15:54:49.666] Invalid RKNN model version 6
E RKNN: [15:54:49.666] rknn_init, load model failed!
E Catch exception when init runtime!
E Traceback (most recent call last):
File "/home/orangepi/miniconda3/envs/rknn/lib/python3.10/site-packages/rknnlite/api/rknn_lite.py", line 157, in init_runtime
self.rknn_runtime.build_graph(self.rknn_data, self.load_model_in_npu)
File "rknnlite/api/rknn_runtime.py", line 921, in rknnlite.api.rknn_runtime.RKNNRuntime.build_graph
Exception: RKNN init failed. error code: RKNN_ERR_FAIL

Init runtime environment failed

若出现同样的错误,那么应该Rknpu2的lib等在板端是古董级别的,需要更新下

首先到方才克隆的根目录中rknpu2/runtime文件夹下,复制需要的文件

sudo cp Linux/librknn_api/aarch64/librknnrt.so /usr/lib/librknnrt.so
sudo Linux/rknn_server/aarch64/usr/bin/rknn_server /usr/bin/rknn_server

另一个librknn_api.so不需要更新

然后应该能正常跑test.py了

W rknn-toolkit-lite2 version: 2.1.0
--> Load RKNN model
done
--> Init runtime environment
I RKNN: [16:06:56.773] RKNN Runtime Information, librknnrt version: 2.1.0 (967d001cc8@2024-08-07T19:28:19)
I RKNN: [16:06:56.773] RKNN Driver Information, version: 0.9.3
I RKNN: [16:06:56.773] RKNN Model Information, version: 6, toolkit version: 2.1.0+708089d1(compiler version: 2.1.0 (967d001cc8@2024-08-07T11:32:45)), target: RKNPU v2, target platform: rk3588, framework name: PyTorch, framework layout: NCHW, model inference type: static_shape
W RKNN: [16:06:56.787] query RKNN_QUERY_INPUT_DYNAMIC_RANGE error, rknn model is static shape type, please export rknn with dynamic_shapes
W Query dynamic range failed. Ret code: RKNN_ERR_MODEL_INVALID. (If it is a static shape RKNN model, please ignore the above warning message.)
done
--> Running model
resnet18
-----TOP 5-----
[812] score:0.999680 class:"space shuttle"
[404] score:0.000249 class:"airliner"
[657] score:0.000013 class:"missile"
[466] score:0.000009 class:"bullet train, bullet"
[895] score:0.000008 class:"warplane, military plane"

done

板端运行自定义rknn模型

示例代码如下

import time
import cv2
import numpy as np
import platform
from rknnlite.api import RKNNLite

# decice tree for rk356x/rk3588
DEVICE_COMPATIBLE_NODE = '/proc/device-tree/compatible'

INPUT_SIZE = 224

RK3588_RKNN_MODEL = 'unimatch_stereo_scale1_1x3x480x640_sim.rknn' # 这里修改为前面转换得到的rknn

IMAGENET_MEAN = np.array([0.485, 0.456, 0.406], dtype=np.float16)
IMAGENET_STD = np.array([0.229, 0.224, 0.225], dtype=np.float16)

left_image = './im0.png'
right_image = './im1.png'

output_path = 'output.png'

if __name__ == '__main__':

input_height = 480
input_width = 640

print(f"input_height={input_height}")
print(f"input_width={input_width}")

left = cv2.resize(cv2.cvtColor(cv2.imread(left_image), cv2.COLOR_BGR2RGB), (input_width, input_height)).astype(
np.float32) / 255.0
right = cv2.resize(cv2.cvtColor(cv2.imread(right_image), cv2.COLOR_BGR2RGB), (input_width, input_height)).astype(
np.float32) / 255.0

left = (left - IMAGENET_MEAN) / IMAGENET_STD
right = (right - IMAGENET_MEAN) / IMAGENET_STD

left = np.transpose(left, (2, 0, 1))[np.newaxis, :, :, :]
right = np.transpose(right, (2, 0, 1))[np.newaxis, :, :, :]

# RKNN Init
rknn_model = RK3588_RKNN_MODEL
rknn_lite = RKNNLite()

# load RKNN model
print('--> Load RKNN model')
ret = rknn_lite.load_rknn(rknn_model)
if ret != 0:
print('Load RKNN model failed')
exit(ret)
print('done')

# init runtime environment
print('--> Init runtime environment')
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)

if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')

# Inference
print('--> Running model')
output = ''

t = time.time()
output = rknn_lite.inference(inputs=[left, right])
dt = time.time() - t
print(f"\033[34mElapsed: {dt:.3f} sec, {1 / dt:.3f} FPS\033[0m")

disp = output[0][0]

norm = ((disp - disp.min()) / (disp.max() - disp.min()) * 255).astype(np.uint8)
colored = cv2.applyColorMap(norm, cv2.COLORMAP_PLASMA)
cv2.imwrite(output_path, colored)
print(f"\033[32moutput: {output_path}\033[0m")

print('done')

rknn_lite.release()

ps. 转换似乎有不少问题,推理不出来正确结果,不过流程确实是这样,或许是onnx模型用到了特殊的算子,转换有失误

私钥登录 Linux

在服务器端生成公钥与私钥

ssh-keygen -a 1000 -t ed25519

信任公钥

cat id_rsa.pub >> authorized_keys

本地登录

拉取id_rsa文件到本地(没有.pub),使用openssh登录

ssh -i C:\Users\Lenovo\.ssh\id_rsa [username]@[hostname] -p 22

Sudo 无密码设定

Sudoer修改默认编辑器

sudo visudo

在sudoer文件中添加一行

Defaults editor=/usr/bin/vim, env_editor

Sudoer 设置 账户不使用密码

[USERNAME] ALL=(ALL) NOPASSWD:ALL

注意放置在sudo项之后

之后即可直接执行sudo

Fstab 硬盘挂载

sudo vim /etc/fstab

添加fstab条目

/dev/sdb1 /home/zsig/wdblack exfat user,exec,rw 0 0
sudo mount /dev/sdb1

即可完成挂载

Atlassian Docker 部署

Dockerfile

FROM atlassian/jira-software:9.11.0-jdk11

USER root

# 将代理破解包加入容器
COPY "atlassian-agent.jar" /opt/atlassian/jira/

# 设置启动加载代理包
RUN echo '\nexport JAVA_OPTS="-javaagent:/opt/atlassian/jira/atlassian-agent.jar ${JAVA_OPTS}"' >> /opt/atlassian/jira/bin/setenv.sh

如果是confluence,替换jira为confluence即可

docker-compose

version: '3'

networks:
web-network:
driver: bridge

services:

jira-core:
image: jira-software:9.11.0-jdk11
container_name: jira-core
hostname: jira-core
ports:
- "8081:8080"
restart: always
tty: true
environment:
JVM_SUPPORT_RECOMMENDED_ARGS: -Djira.downgrade.allowed=true
volumes:
- ./jira/data:/var/atlassian/application-data/jira
- ./jira/mysql-connector-j-8.0.31.jar:/opt/atlassian/jira/atlassian-jira/WEB-INF/lib/mysql-connector-j-8.0.31.jar
networks:
- web-network

confluence:
image: confluence-server:8.6-jdk17
container_name: confluence
hostname: confluence
ports:
- "8090:8090"
- "8091:8091"
restart: always
tty: true
volumes:
- ./confluence/data:/var/atlassian/application-data/confluence
- ./confluence/mysql-connector-java-8.0.23.jar:/opt/atlassian/confluence/confluence/WEB-INF/lib/mysql-connector-java-8.0.23.jar
networks:
- web-network

docker-mysql:
image: mysql:8.0.26
hostname: mysql
restart: always
tty: true
volumes:
- ./mysql/my.cnf:/etc/my.cnf
- ./mysql/data:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: YourPassword
MYSQL_DATABASE: jira
networks:
- web-network

后续需要在mysql创建confluence数据库,编码utf8mb4

CREATE DATABASE confluence CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;

Agent生成Key

java -jar atlassian-agent.jar -d -m test@test.com -n BAT -p jira -o http://localhost:7990 -s [SERVER_ID]

如果是confluence,则jira换为conf即可生成

0%