LRCGET:基于Tauri的离线音乐库批量歌词自动化管理方案
2026/5/10 19:03:05
这是最直接且性能较好的Java集成方式。
pip install torch transformers onnx onnxruntime
加载模型、设置为评估模式并导出onnx
importtorchfromFlagEmbeddingimportBGEM3FlagModel model=BGEM3FlagModel('BAAI/bge-m3',use_fp16=True)model.eval()# 设置为评估模式dummy_input=torch.tensor([[0]*model.config.hidden_size])# 需要根据模型结构调整onnx_path="bge_m3.onnx"torch.onnx.export(model,dummy_input,onnx_path,opset_version=13,input_names=['input_ids','attention_mask'],# 根据实际输入调整output_names=['output'])<dependency><groupId>com.microsoft.onnxruntime</groupId><artifactId>onnxruntime</artifactId><version>1.17.0</version></dependency>Java代码示例:
importai.onnxruntime.*;importjava.nio.file.Paths;importjava.util.*;publicclassBGE_M3_ONNX{privateOrtSessionsession;privateOrtEnvironmentenv;publicBGE_M3_ONNX(StringmodelPath)throwsOrtException{env=OrtEnvironment.getEnvironment();OrtSession.SessionOptionsopts=newOrtSession.SessionOptions();session=env.createSession(modelPath,opts);}publicfloat[]getEmbedding(Stringtext)throwsOrtException{// 文本预处理(需要实现分词器)// 这里简化处理,实际需要将文本转换为input_ids和attention_mask// 准备输入long[]inputIds=tokenize(text);// 需要实现tokenize方法long[]attentionMask=createAttentionMask(inputIds);// 创建输入Tensorlong[]shape={1,inputIds.length};OnnxTensorinputIdsTensor=OnnxTensor.createTensor(env,inputIds,shape);OnnxTensorattentionMaskTensor=OnnxTensor.createTensor(env,attentionMask,shape);// 准备输入MapMap<String,OnnxTensor>inputs=newHashMap<>();inputs.put("input_ids",inputIdsTensor);inputs.put("attention_mask",attentionMaskTensor);// 运行推理OrtSession.Resultresults=session.run(inputs);// 获取输出OnnxTensoroutputTensor=(OnnxTensor)results.get("output");float[]embeddings=(float[])outputTensor.getValue();returnembeddings;}privatelong[]tokenize(Stringtext){// 需要实现BGE-M3的分词逻辑// 可以使用HuggingFace的tokenizer或实现简单的分词returnnewlong[]{101,2345,6789,102};// 示例}privatelong[]createAttentionMask(long[]inputIds){long[]mask=newlong[inputIds.length];Arrays.fill(mask,1L);returnmask;}publicvoidclose()throwsOrtException{if(session!=null){session.close();}}}DJL是亚马逊开发的Java深度学习库,支持PyTorch、TensorFlow等模型。
Maven依赖:
<dependency><groupId>ai.djl</groupId><artifactId>api</artifactId><version>0.27.0</version></dependency><dependency><groupId>ai.djl.pytorch</groupId><artifactId>pytorch-engine</artifactId><version>0.27.0</version></dependency>Java代码示例:
importai.djl.Model;importai.djl.inference.Predictor;importai.djl.modality.Input;importai.djl.modality.Output;importai.djl.translate.TranslateException;importai.djl.translate.Translator;importjava.nio.file.Paths;publicclassBGE_M3_DJL{privateModelmodel;privatePredictor<String,float[]>predictor;publicvoidloadModel(StringmodelPath)throwsException{model=Model.newInstance("bge-m3");model.load(Paths.get(modelPath));// 创建Translator(需要自定义实现)Translator<String,float[]>translator=newBGETranslator();predictor=model.newPredictor(translator);}publicfloat[]getEmbedding(Stringtext)throwsTranslateException{returnpredictor.predict(text);}publicvoidclose(){if(predictor!=null){predictor.close();}if(model!=null){model.close();}}// 自定义TranslatorstaticclassBGETranslatorimplementsTranslator<String,float[]>{@Overridepublicfloat[]processOutput(ai.djl.ndarray.NDListlist){// 处理模型输出returnlist.get(0).toFloatArray();}@Overridepublicai.djl.ndarray.NDListprocessInput(TranslatorContextctx,Stringinput){// 文本预处理和tokenization// 需要实现分词逻辑returnnewai.djl.ndarray.NDList();}}}如果模型部署在Python服务中,可以通过HTTP调用。
Python服务端(FastAPI):
fromfastapiimportFastAPIfrompydanticimportBaseModelfromFlagEmbeddingimportBGEM3FlagModelimportnumpyasnp app=FastAPI()model=BGEM3FlagModel('BAAI/bge-m3',use_fp16=True)classEmbeddingRequest(BaseModel):texts:list[str]@app.post("/embed")asyncdefget_embeddings(request:EmbeddingRequest):embeddings=model.encode(request.texts)['dense_vecs']return{"embeddings":embeddings.tolist()}Java客户端:
importcom.fasterxml.jackson.databind.ObjectMapper;importokhttp3.*;publicclassBGE_M3_API_Client{privatestaticfinalStringAPI_URL="http://localhost:8000/embed";privatefinalOkHttpClientclient=newOkHttpClient();privatefinalObjectMappermapper=newObjectMapper();publicfloat[][]getEmbeddings(List<String>texts)throwsException{// 构建请求体Map<String,Object>requestBody=newHashMap<>();requestBody.put("texts",texts);Stringjson=mapper.writeValueAsString(requestBody);RequestBodybody=RequestBody.create(json,MediaType.parse("application/json"));Requestrequest=newRequest.Builder().url(API_URL).post(body).build();try(Responseresponse=client.newCall(request).execute()){if(!response.isSuccessful()){thrownewRuntimeException("请求失败: "+response.code());}StringresponseBody=response.body().string();Map<String,Object>result=mapper.readValue(responseBody,Map.class);// 解析返回的向量List<List<Double>>embeddingsList=(List<List<Double>>)result.get("embeddings");float[][]embeddings=newfloat[embeddingsList.size()][];for(inti=0;i<embeddingsList.size();i++){List<Double>vec=embeddingsList.get(i);embeddings[i]=newfloat[vec.size()];for(intj=0;j<vec.size();j++){embeddings[i][j]=vec.get(j).floatValue();}}returnembeddings;}}}根据搜索结果,DeepSeek4j提供了BGE-M3的Java支持。
代码示例:
importcom.deepseek4j.embedding.EmbeddingClient;importcom.deepseek4j.embedding.EmbeddingRequest;importcom.deepseek4j.embedding.EmbeddingResponse;publicclassBGE_M3_DeepSeek4j{publicstaticvoidmain(String[]args){EmbeddingClientclient=newEmbeddingClient();EmbeddingRequestrequest=EmbeddingRequest.builder().model("bge-m3:latest").input("What is BGE M3?").build();try{EmbeddingResponseresponse=client.embed(request);float[]embedding=response.getEmbedding();System.out.println("向量维度: "+embedding.length);System.out.println("向量: "+Arrays.toString(embedding));}catch(Exceptione){e.printStackTrace();}}}