Edit Aicompanion2_0.java

Made sure Ollama sends a text instead of sending back JSONs of generated / used tokens  and uses gpt-oss:20b
This commit is contained in:
Cametendo
2026-02-27 17:23:55 +01:00
parent 3ca8c8baca
commit ef2567e2ee
4 changed files with 27 additions and 20 deletions

2
.env Normal file
View File

@@ -0,0 +1,2 @@
API_BASE_URL="ollama.cametendo.org"
MODEL="gpt-oss:20b"

0
.env.example Normal file
View File

0
gradlew vendored Normal file → Executable file
View File

View File

@@ -12,11 +12,16 @@ import java.io.OutputStream;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.URL; import java.net.URL;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Properties;
public class Aicompanion2_0 implements ModInitializer { public class Aicompanion2_0 implements ModInitializer {
public static final String MOD_ID = "aicompanion2_0"; public static final String MOD_ID = "aicompanion2_0";
private static String API_BASE_URL = "http://localhost:11434"; private static String API_BASE_URL = "http://ollama.cametendo.org";
private static String API_PATH = "/api/generate"; private static String API_PATH = "/api/generate";
@Override @Override
@@ -73,20 +78,21 @@ public class Aicompanion2_0 implements ModInitializer {
System.out.println("[" + MOD_ID + "] MOD GELADEN!"); System.out.println("[" + MOD_ID + "] MOD GELADEN!");
} }
// Innerhalb deiner Klasse Aicompanion2_0
private String callOllama(String prompt) throws Exception { private String callOllama(String prompt) throws Exception {
// URL deines OllamaServers, ggf. anpassen
URL url = new URL(API_BASE_URL + API_PATH); URL url = new URL(API_BASE_URL + API_PATH);
HttpURLConnection conn = (HttpURLConnection) url.openConnection(); HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("POST"); conn.setRequestMethod("POST");
conn.setRequestProperty("Content-Type", "application/json; charset=utf-8"); conn.setRequestProperty("Content-Type", "application/json; charset=utf-8");
conn.setDoOutput(true); conn.setDoOutput(true);
// WICHTIG: "stream": false hinzugefügt
String json = """ String json = """
{ {
"model": "llama3", "model": "gpt-oss:20b",
"prompt": "%s" "prompt": "%s",
"stream": false
} }
""".formatted(prompt.replace("\"", "\\\"")); """.formatted(prompt.replace("\"", "\\\""));
@@ -95,28 +101,28 @@ public class Aicompanion2_0 implements ModInitializer {
os.write(input, 0, input.length); os.write(input, 0, input.length);
} }
int status = conn.getResponseCode(); if (conn.getResponseCode() != 200) return "Fehler: " + conn.getResponseCode();
if (status != 200) {
System.out.println("[" + MOD_ID + "] Ollama HTTP Status: " + status);
return null;
}
StringBuilder resp = new StringBuilder(); StringBuilder resp = new StringBuilder();
try (BufferedReader br = new BufferedReader( try (BufferedReader br = new BufferedReader(new InputStreamReader(conn.getInputStream(), "utf-8"))) {
new InputStreamReader(conn.getInputStream(), "utf-8"))) {
String line; String line;
while ((line = br.readLine()) != null) { while ((line = br.readLine()) != null) {
resp.append(line); resp.append(line);
} }
} }
conn.disconnect(); conn.disconnect();
// Sehr einfach: komplette JSONAntwort zurückgeben // Primitives Parsing der Antwort (ohne externe Library)
// Später können wir das noch sauber parsen String fullResponse = resp.toString();
return resp.toString(); if (fullResponse.contains("\"response\":\"")) {
int start = fullResponse.indexOf("\"response\":\"") + 12;
int end = fullResponse.indexOf("\"", start);
return fullResponse.substring(start, end);
} }
}
return fullResponse;
}
private void loadConfig() { private void loadConfig() {
try { try {
Path configPath = Path.of("config", "aicompanion2_0.properties"); Path configPath = Path.of("config", "aicompanion2_0.properties");
@@ -130,5 +136,4 @@ private void loadConfig() {
System.out.println("[" + MOD_ID + "] Keine Config gefunden, benutze Default-API."); System.out.println("[" + MOD_ID + "] Keine Config gefunden, benutze Default-API.");
} }
} }
}