Skip to content

Commit 2d71148

Browse files
committed
fixed format
1 parent a32eda0 commit 2d71148

File tree

1 file changed

+29
-23
lines changed
  • frontend/server/src/main/java/org/pytorch/serve/wlm

1 file changed

+29
-23
lines changed

frontend/server/src/main/java/org/pytorch/serve/wlm/Model.java

+29-23
Original file line numberDiff line numberDiff line change
@@ -190,29 +190,35 @@ public JsonObject getModelState(boolean isDefaultVersion) {
190190
}
191191

192192
public void setModelState(JsonObject modelInfo) {
193-
minWorkers = modelInfo.has(MIN_WORKERS) && !modelInfo.get(MIN_WORKERS).isJsonNull()
194-
? modelInfo.get(MIN_WORKERS).getAsInt()
195-
: 1; // default value for minWorkers
196-
197-
maxWorkers = modelInfo.has(MAX_WORKERS) && !modelInfo.get(MAX_WORKERS).isJsonNull()
198-
? modelInfo.get(MAX_WORKERS).getAsInt()
199-
: 5; // default value for maxWorkers
200-
201-
maxBatchDelay = modelInfo.has(MAX_BATCH_DELAY) && !modelInfo.get(MAX_BATCH_DELAY).isJsonNull()
202-
? modelInfo.get(MAX_BATCH_DELAY).getAsInt()
203-
: 100; // default value for maxBatchDelay
204-
205-
responseTimeout = modelInfo.has(RESPONSE_TIMEOUT) && !modelInfo.get(RESPONSE_TIMEOUT).isJsonNull()
206-
? modelInfo.get(RESPONSE_TIMEOUT).getAsInt()
207-
: 120; // default value for responseTimeout
208-
209-
startupTimeout = modelInfo.has(STARTUP_TIMEOUT) && !modelInfo.get(STARTUP_TIMEOUT).isJsonNull()
210-
? modelInfo.get(STARTUP_TIMEOUT).getAsInt()
211-
: 120; // default value for startupTimeout
212-
213-
batchSize = modelInfo.has(BATCH_SIZE) && !modelInfo.get(BATCH_SIZE).isJsonNull()
214-
? modelInfo.get(BATCH_SIZE).getAsInt()
215-
: 1; // default value for batchSize
193+
minWorkers =
194+
modelInfo.has(MIN_WORKERS) && !modelInfo.get(MIN_WORKERS).isJsonNull()
195+
? modelInfo.get(MIN_WORKERS).getAsInt()
196+
: 1; // default value for minWorkers
197+
198+
maxWorkers =
199+
modelInfo.has(MAX_WORKERS) && !modelInfo.get(MAX_WORKERS).isJsonNull()
200+
? modelInfo.get(MAX_WORKERS).getAsInt()
201+
: 5; // default value for maxWorkers
202+
203+
maxBatchDelay =
204+
modelInfo.has(MAX_BATCH_DELAY) && !modelInfo.get(MAX_BATCH_DELAY).isJsonNull()
205+
? modelInfo.get(MAX_BATCH_DELAY).getAsInt()
206+
: 100; // default value for maxBatchDelay
207+
208+
responseTimeout =
209+
modelInfo.has(RESPONSE_TIMEOUT) && !modelInfo.get(RESPONSE_TIMEOUT).isJsonNull()
210+
? modelInfo.get(RESPONSE_TIMEOUT).getAsInt()
211+
: 120; // default value for responseTimeout
212+
213+
startupTimeout =
214+
modelInfo.has(STARTUP_TIMEOUT) && !modelInfo.get(STARTUP_TIMEOUT).isJsonNull()
215+
? modelInfo.get(STARTUP_TIMEOUT).getAsInt()
216+
: 120; // default value for startupTimeout
217+
218+
batchSize =
219+
modelInfo.has(BATCH_SIZE) && !modelInfo.get(BATCH_SIZE).isJsonNull()
220+
? modelInfo.get(BATCH_SIZE).getAsInt()
221+
: 1; // default value for batchSize
216222

217223
JsonElement runtime = modelInfo.get(RUNTIME_TYPE);
218224
String runtime_str = Manifest.RuntimeType.PYTHON.getValue();

0 commit comments

Comments
 (0)