Skip to content

Commit f8bd06c

Browse files
committed
added endpoints for db load
1 parent 11bd82b commit f8bd06c

File tree

2 files changed

+363
-0
lines changed

2 files changed

+363
-0
lines changed

src/main/java/org/springframework/samples/petclinic/clinicactivity/ClinicActivityController.java

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,56 @@ public ResponseEntity<String> recreateAndPopulateLogs(@RequestParam(name = "coun
161161
}
162162
}
163163

164+
/**
165+
* 🔒 LOCK CONTENTION LOAD endpoint - Maximum database lock pressure
166+
* Creates lock contention scenarios with concurrent transactions and deadlock situations
167+
*/
168+
@PostMapping("/lock-contention-load")
169+
public ResponseEntity<String> createLockContentionLoad(@RequestParam(name = "threads", defaultValue = "50") int threads,
170+
@RequestParam(name = "duration", defaultValue = "300") int durationSeconds) {
171+
logger.warn("Received request to create LOCK CONTENTION LOAD with {} threads for {} seconds - This will create MASSIVE lock contention!", threads, durationSeconds);
172+
if (threads <= 0 || durationSeconds <= 0) {
173+
return ResponseEntity.badRequest().body("Threads and duration must be positive integers.");
174+
}
175+
if (threads > 50) {
176+
return ResponseEntity.badRequest().body("Too many threads for lock contention - maximum 50 to prevent system crash.");
177+
}
178+
if (durationSeconds > 300) {
179+
return ResponseEntity.badRequest().body("Duration too long for lock contention - maximum 300 seconds to prevent system lockup.");
180+
}
181+
try {
182+
dataService.createLockContentionLoad(threads, durationSeconds);
183+
return ResponseEntity.ok("Successfully completed LOCK CONTENTION LOAD with " + threads + " threads for " + durationSeconds + " seconds - Locks were contended!");
184+
} catch (Exception e) {
185+
logger.error("Error during lock contention load", e);
186+
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body("Error during lock contention load: " + e.getMessage());
187+
}
188+
}
189+
190+
/**
191+
* 💾 I/O INTENSIVE LOAD endpoint - Maximum I/O pressure (Read + Write)
192+
* Creates massive I/O operations with random access patterns to stress storage subsystem
193+
* Uses simple queries with large data transfers to keep I/O busy while minimizing CPU/Memory usage
194+
* Focuses on disk I/O bottlenecks that can be improved by faster storage or read replicas
195+
*/
196+
@PostMapping("/io-intensive-load")
197+
public ResponseEntity<String> createIOIntensiveLoad(@RequestParam(name = "duration", defaultValue = "5") int durationMinutes) {
198+
logger.warn("Received request to create I/O INTENSIVE LOAD for {} minutes - This will MAX OUT disk I/O operations!", durationMinutes);
199+
if (durationMinutes <= 0) {
200+
return ResponseEntity.badRequest().body("Duration must be a positive integer.");
201+
}
202+
if (durationMinutes > 60) {
203+
return ResponseEntity.badRequest().body("Duration too high for I/O intensive load - maximum 60 minutes to prevent storage overload.");
204+
}
205+
try {
206+
dataService.createIOIntensiveLoad(durationMinutes);
207+
return ResponseEntity.ok("Successfully completed I/O INTENSIVE LOAD for " + durationMinutes + " minutes - Disk I/O was maxed out!");
208+
} catch (Exception e) {
209+
logger.error("Error during I/O intensive load", e);
210+
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body("Error during I/O intensive load: " + e.getMessage());
211+
}
212+
}
213+
164214
private void performObservableOperation(String operationName) {
165215
Span span = otelTracer.spanBuilder(operationName)
166216
.setSpanKind(SpanKind.CLIENT)

src/main/java/org/springframework/samples/petclinic/clinicactivity/ClinicActivityDataService.java

Lines changed: 313 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,13 @@
2525
import java.util.List;
2626
import java.util.Locale;
2727
import java.util.concurrent.TimeUnit;
28+
import java.util.concurrent.ExecutorService;
29+
import java.util.concurrent.Executors;
30+
import java.util.concurrent.Future;
31+
import java.util.concurrent.atomic.AtomicInteger;
2832
import java.util.Random;
33+
import java.util.Map;
34+
import java.util.HashMap;
2935

3036
@Service
3137
public class ClinicActivityDataService {
@@ -47,6 +53,7 @@ public class ClinicActivityDataService {
4753
"Emergency Alert", "Consultation Note", "Follow-up Reminder"
4854
);
4955
private final Random random = new Random();
56+
private final ExecutorService executorService = Executors.newFixedThreadPool(8);
5057

5158
@Autowired
5259
public ClinicActivityDataService(ClinicActivityLogRepository repository,
@@ -200,4 +207,310 @@ private String csv(String value) {
200207
String escaped = value.replace("\"", "\"\"").replace("\\", "\\\\");
201208
return '"' + escaped + '"';
202209
}
210+
/**
211+
* 5. LOCK CONTENTION LOAD - Maximum database lock pressure and concurrent access
212+
* Creates lock contention scenarios with multiple threads competing for same resources
213+
*/
214+
public void createLockContentionLoad(int numberOfThreads, int durationSeconds) {
215+
logger.warn("Starting LOCK CONTENTION load test with {} threads for {} seconds - This will create MASSIVE lock contention!",
216+
numberOfThreads, durationSeconds);
217+
long startTime = System.currentTimeMillis();
218+
long endTime = startTime + (durationSeconds * 1000L);
219+
220+
try {
221+
// Create a shared list to track thread results
222+
List<String> threadResults = new ArrayList<>();
223+
List<Thread> threads = new ArrayList<>();
224+
225+
// Create multiple competing threads
226+
for (int t = 0; t < numberOfThreads; t++) {
227+
final int threadId = t;
228+
Thread lockContentionThread = new Thread(() -> {
229+
try {
230+
createLockContentionForThread(threadId, endTime, threadResults);
231+
} catch (Exception e) {
232+
logger.error("Error in lock contention thread {}", threadId, e);
233+
}
234+
});
235+
236+
lockContentionThread.setName("LockContentionThread-" + threadId);
237+
threads.add(lockContentionThread);
238+
}
239+
240+
// Start all threads simultaneously
241+
logger.info("Starting {} lock contention threads...", numberOfThreads);
242+
for (Thread thread : threads) {
243+
thread.start();
244+
}
245+
246+
// Wait for all threads to complete
247+
for (Thread thread : threads) {
248+
try {
249+
thread.join();
250+
} catch (InterruptedException e) {
251+
Thread.currentThread().interrupt();
252+
logger.warn("Interrupted while waiting for thread: {}", thread.getName());
253+
}
254+
}
255+
256+
long actualEndTime = System.currentTimeMillis();
257+
logger.warn("Completed LOCK CONTENTION load test in {} ms with {} threads. Results: {}",
258+
(actualEndTime - startTime), numberOfThreads, threadResults.size());
259+
260+
} catch (Exception e) {
261+
logger.error("Error during lock contention load test", e);
262+
throw new RuntimeException("Error during lock contention load test: " + e.getMessage(), e);
263+
}
264+
}
265+
266+
private void createLockContentionForThread(int threadId, long endTime, List<String> threadResults) {
267+
Faker faker = new Faker(new Locale("en-US"));
268+
int operationCount = 0;
269+
270+
while (System.currentTimeMillis() < endTime) {
271+
DefaultTransactionDefinition def = new DefaultTransactionDefinition();
272+
def.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW);
273+
274+
// Vary isolation levels to create different lock behaviors
275+
switch (threadId % 4) {
276+
case 0:
277+
def.setIsolationLevel(TransactionDefinition.ISOLATION_READ_COMMITTED);
278+
break;
279+
case 1:
280+
def.setIsolationLevel(TransactionDefinition.ISOLATION_REPEATABLE_READ);
281+
break;
282+
case 2:
283+
def.setIsolationLevel(TransactionDefinition.ISOLATION_SERIALIZABLE);
284+
break;
285+
default:
286+
def.setIsolationLevel(TransactionDefinition.ISOLATION_READ_UNCOMMITTED);
287+
break;
288+
}
289+
290+
TransactionStatus status = transactionManager.getTransaction(def);
291+
292+
try {
293+
// Strategy 1: Compete for same high-value records (guaranteed contention)
294+
if (operationCount % 5 == 0) {
295+
// All threads fight for the same "high value" records
296+
List<Map<String, Object>> contestedRecords = jdbcTemplate.queryForList(
297+
"SELECT * FROM clinic_activity_logs WHERE numeric_value BETWEEN 90000 AND 100000 " +
298+
"ORDER BY numeric_value DESC LIMIT 10 FOR UPDATE");
299+
300+
// Update these contested records
301+
for (Map<String, Object> record : contestedRecords) {
302+
jdbcTemplate.update(
303+
"UPDATE clinic_activity_logs SET payload = ?, numeric_value = ? WHERE id = ?",
304+
"CONTESTED_UPDATE_THREAD_" + threadId + "_OP_" + operationCount + " " + faker.lorem().sentence(20),
305+
faker.number().numberBetween(90000, 100000),
306+
record.get("id"));
307+
}
308+
}
309+
310+
// Strategy 2: Create deadlock scenarios (lock ordering conflicts)
311+
else if (operationCount % 5 == 1) {
312+
if (threadId % 2 == 0) {
313+
// Even threads: Lock A then B
314+
jdbcTemplate.queryForList(
315+
"SELECT * FROM clinic_activity_logs WHERE id BETWEEN 1 AND 50 ORDER BY id FOR UPDATE");
316+
Thread.sleep(10); // Small delay to increase deadlock chance
317+
jdbcTemplate.queryForList(
318+
"SELECT * FROM clinic_activity_logs WHERE id BETWEEN 51 AND 100 ORDER BY id FOR UPDATE");
319+
} else {
320+
// Odd threads: Lock B then A (reverse order = deadlock risk)
321+
jdbcTemplate.queryForList(
322+
"SELECT * FROM clinic_activity_logs WHERE id BETWEEN 51 AND 100 ORDER BY id DESC FOR UPDATE");
323+
Thread.sleep(10);
324+
jdbcTemplate.queryForList(
325+
"SELECT * FROM clinic_activity_logs WHERE id BETWEEN 1 AND 50 ORDER BY id DESC FOR UPDATE");
326+
}
327+
}
328+
329+
// Strategy 3: Table-level lock contention
330+
else if (operationCount % 5 == 2) {
331+
// Force table scan with update (creates many row locks)
332+
jdbcTemplate.update(
333+
"UPDATE clinic_activity_logs SET payload = payload || ? WHERE activity_type = ? AND LENGTH(payload) < 5000",
334+
" [THREAD_" + threadId + "_SCAN_UPDATE]",
335+
ACTIVITY_TYPES.get(threadId % ACTIVITY_TYPES.size()));
336+
}
337+
338+
// Strategy 4: Bulk operations causing lock escalation
339+
else if (operationCount % 5 == 3) {
340+
// Large batch update (may cause lock escalation)
341+
jdbcTemplate.update(
342+
"UPDATE clinic_activity_logs SET numeric_value = numeric_value + ? " +
343+
"WHERE activity_type = ? AND numeric_value BETWEEN ? AND ?",
344+
threadId,
345+
ACTIVITY_TYPES.get(threadId % ACTIVITY_TYPES.size()),
346+
threadId * 10000,
347+
(threadId + 1) * 10000);
348+
}
349+
350+
// Strategy 5: Long-running transaction with many locks
351+
else {
352+
// Hold multiple locks for extended period
353+
for (int i = 0; i < 20; i++) {
354+
int targetId = (threadId * 1000 + i) % 10000 + 1;
355+
jdbcTemplate.queryForList(
356+
"SELECT * FROM clinic_activity_logs WHERE id = ? FOR UPDATE", targetId);
357+
358+
jdbcTemplate.update(
359+
"UPDATE clinic_activity_logs SET payload = ? WHERE id = ?",
360+
"LONG_RUNNING_THREAD_" + threadId + "_LOCK_" + i + " " + faker.lorem().sentence(10),
361+
targetId);
362+
363+
if (i % 5 == 0) {
364+
Thread.sleep(5); // Hold locks longer
365+
}
366+
}
367+
}
368+
369+
transactionManager.commit(status);
370+
operationCount++;
371+
372+
// Add random small delays to vary timing
373+
if (operationCount % 10 == 0) {
374+
Thread.sleep(faker.number().numberBetween(1, 10));
375+
}
376+
377+
} catch (Exception e) {
378+
if (!status.isCompleted()) {
379+
transactionManager.rollback(status);
380+
}
381+
382+
// Log deadlocks and lock timeouts (these are expected!)
383+
if (e.getMessage() != null &&
384+
(e.getMessage().contains("deadlock") ||
385+
e.getMessage().contains("lock") ||
386+
e.getMessage().contains("timeout"))) {
387+
logger.debug("Expected lock contention in thread {}: {}", threadId, e.getMessage());
388+
} else {
389+
logger.error("Unexpected error in lock contention thread {}", threadId, e);
390+
}
391+
392+
try {
393+
Thread.sleep(5); // Brief pause after error
394+
} catch (InterruptedException ie) {
395+
Thread.currentThread().interrupt();
396+
break;
397+
}
398+
}
399+
}
400+
401+
synchronized (threadResults) {
402+
threadResults.add("Thread-" + threadId + ": " + operationCount + " operations");
403+
}
404+
405+
logger.info("Lock contention thread {} completed {} operations", threadId, operationCount);
406+
}
407+
408+
/**
409+
* 6. I/O INTENSIVE LOAD - Maximum disk I/O pressure with minimal CPU/Memory usage
410+
* Creates massive I/O operations with random access patterns to stress storage subsystem
411+
* Uses simple queries with large data transfers to keep I/O busy while minimizing CPU/Memory usage
412+
* Focuses on disk I/O bottlenecks that can be improved by faster storage or read replicas
413+
*/
414+
public void createIOIntensiveLoad(int durationMinutes) {
415+
logger.warn("Starting I/O INTENSIVE load test for {} minutes - This will MAX OUT disk I/O operations!", durationMinutes);
416+
long startTime = System.currentTimeMillis();
417+
long endTime = startTime + (durationMinutes * 60 * 1000L);
418+
419+
try {
420+
AtomicInteger globalOperationCount = new AtomicInteger(0);
421+
List<Thread> threads = new ArrayList<>();
422+
423+
// Use fewer threads than CPU intensive to minimize CPU usage (focus on I/O)
424+
int numThreads = 6;
425+
logger.info("Creating {} I/O intensive threads (fewer than CPU load to focus on disk I/O)...", numThreads);
426+
427+
// Create I/O intensive threads
428+
for (int t = 0; t < numThreads; t++) {
429+
final int threadId = t;
430+
Thread ioThread = new Thread(() -> {
431+
try {
432+
executeIOIntensiveThread(threadId, endTime, globalOperationCount);
433+
} catch (Exception e) {
434+
logger.error("Error in I/O intensive thread {}", threadId, e);
435+
}
436+
});
437+
438+
ioThread.setName("IOIntensiveThread-" + threadId);
439+
threads.add(ioThread);
440+
}
441+
442+
// Start all threads
443+
logger.info("Starting all {} I/O intensive threads...", numThreads);
444+
for (Thread thread : threads) {
445+
thread.start();
446+
}
447+
448+
// Wait for all threads to complete
449+
for (Thread thread : threads) {
450+
try {
451+
thread.join();
452+
} catch (InterruptedException e) {
453+
Thread.currentThread().interrupt();
454+
logger.warn("Interrupted while waiting for I/O thread: {}", thread.getName());
455+
}
456+
}
457+
458+
long actualEndTime = System.currentTimeMillis();
459+
logger.warn("Completed I/O INTENSIVE load test in {} ms. Total operations: {}",
460+
(actualEndTime - startTime), globalOperationCount.get());
461+
462+
} catch (Exception e) {
463+
logger.error("Error during I/O intensive load test", e);
464+
throw new RuntimeException("Error during I/O intensive load test: " + e.getMessage(), e);
465+
}
466+
}
467+
468+
private void executeIOIntensiveThread(int threadId, long endTime, AtomicInteger globalOperationCount) {
469+
Random random = new Random();
470+
Faker faker = new Faker(new Locale("en-US"));
471+
int localOperationCount = 0;
472+
473+
logger.info("I/O Thread {} starting I/O intensive operations...", threadId);
474+
475+
while (System.currentTimeMillis() < endTime) {
476+
try {
477+
// LARGE SEQUENTIAL SCAN - Forces full table scan I/O
478+
jdbcTemplate.queryForList(
479+
"SET work_mem = '512MB';" +
480+
"SELECT id, activity_type, numeric_value, event_timestamp, payload " +
481+
"FROM clinic_activity_logs " +
482+
"WHERE LENGTH(payload) > 100 " +
483+
"ORDER BY random()" +
484+
"LIMIT 350000");
485+
486+
487+
localOperationCount++;
488+
int currentGlobalCount = globalOperationCount.incrementAndGet();
489+
490+
// Log progress every 100 operations per thread
491+
if (localOperationCount % 100 == 0) {
492+
long remainingTime = (endTime - System.currentTimeMillis()) / 1000;
493+
logger.info("I/O Thread {} completed {} operations (Global: {}). Time remaining: {}s",
494+
threadId, localOperationCount, currentGlobalCount, remainingTime);
495+
}
496+
497+
// No sleep - continuous I/O operations for maximum I/O pressure
498+
// But avoid overwhelming the system with a tiny yield
499+
if (localOperationCount % 50 == 0) {
500+
Thread.yield();
501+
}
502+
503+
} catch (Exception e) {
504+
logger.error("Error in I/O operation for thread {}", threadId, e);
505+
try {
506+
Thread.sleep(10); // Brief pause on error
507+
} catch (InterruptedException ie) {
508+
Thread.currentThread().interrupt();
509+
break;
510+
}
511+
}
512+
}
513+
514+
logger.info("I/O Thread {} completed {} total I/O operations", threadId, localOperationCount);
515+
}
203516
}

0 commit comments

Comments
 (0)