Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import com.loopers.domain.product.ProductService;
import com.loopers.domain.user.User;
import com.loopers.domain.user.UserService;
import com.loopers.infrastructure.product.ProductCacheService;
import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
Expand All @@ -24,6 +25,7 @@ public class LikeFacade {
private final ProductService productService;
private final UserService userService;
private final BrandService brandService;
private final ProductCacheService productCacheService;

@Transactional(readOnly = true)
public List<ProductInfo> getLikedProducts(String loginId, String rawPassword) {
Expand All @@ -45,12 +47,16 @@ public void addLike(String loginId, String rawPassword, Long productId) {
User user = userService.authenticate(loginId, rawPassword);
likeService.addLike(user.getId(), productId);
productService.increaseLikesCount(productId);
productCacheService.delete(productId); // likes_count 변경 → 상세 캐시 무효화
productCacheService.deleteListAll(); // likes_desc 정렬 순서 변경 → 목록 캐시 무효화
}

@Transactional
public void removeLike(String loginId, String rawPassword, Long productId) {
User user = userService.authenticate(loginId, rawPassword);
likeService.removeLike(user.getId(), productId);
productService.decreaseLikesCount(productId);
productCacheService.delete(productId); // likes_count 변경 → 상세 캐시 무효화
productCacheService.deleteListAll(); // likes_desc 정렬 순서 변경 → 목록 캐시 무효화
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;

import ObjectOptimisticLockingFailureException;
import org.springframework.orm.ObjectOptimisticLockingFailureException;

import java.util.ArrayList;
import java.util.Comparator;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import com.loopers.domain.brand.BrandService;
import com.loopers.domain.product.Product;
import com.loopers.domain.product.ProductService;
import com.loopers.infrastructure.product.ProductCacheService;
import com.loopers.support.error.CoreException;
import com.loopers.support.error.ErrorType;
import lombok.RequiredArgsConstructor;
Expand All @@ -16,6 +17,7 @@

import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;

@RequiredArgsConstructor
Expand All @@ -24,24 +26,42 @@ public class ProductFacade {

private final ProductService productService;
private final BrandService brandService;
private final ProductCacheService productCacheService;

@Transactional
public ProductInfo createProduct(Long brandId, String name, Integer price, Integer stock,
String description, String imageUrl) {
Brand brand = brandService.getBrand(brandId);
Product product = productService.createProduct(brandId, name, price, stock, description, imageUrl);
productCacheService.deleteListAll();
return ProductInfo.from(product, brand);
Comment on lines 31 to 37
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

상품 변경 캐시 무효화도 커밋 이후로 미뤄야 한다

생성/수정/삭제 모두 같은 트랜잭션 안에서 Redis 키를 먼저 지우고 있다. 이 순서에서는 커밋 전에 다른 요청이 이전 DB 상태를 읽어 캐시를 다시 채우는 경쟁 조건이 생겨, 방금 반영한 상품 정보가 오래된 캐시로 덮일 수 있다. 무효화는 afterCommit 훅이나 커밋 후 이벤트 리스너로 옮기는 편이 안전하다. 추가로 생성/수정/삭제 각각에 대해 “커밋 전 조회가 캐시를 재생성하더라도 커밋 후 최종 캐시는 최신 상태”인지 보는 동시성 통합 테스트를 두는 것이 좋다.

Also applies to: 87-101

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java`
around lines 31 - 37, The cache invalidation
(productCacheService.deleteListAll()) is happening inside the transactional
method createProduct (and similarly in the update/delete methods around lines
87-101), causing a race where other requests can repopulate stale cache before
the DB commit; move the cache deletion to run after commit by registering a
post-commit callback (e.g., using
TransactionSynchronizationManager.registerSynchronization or an afterCommit
event listener) so that productCacheService.deleteListAll() executes only after
the transaction successfully commits and ensure the same change for the
corresponding update/delete methods.

}

@Transactional(readOnly = true)
public ProductInfo getProductDetail(Long id) {
Product product = productService.getProduct(id);
Brand brand = brandService.getBrand(product.getBrandId());
return ProductInfo.from(product, brand);
// 1. 캐시 조회
return productCacheService.get(id).orElseGet(() -> {
// 2. Cache Miss → DB 조회
Product product = productService.getProduct(id);
Brand brand = brandService.getBrand(product.getBrandId());
ProductInfo productInfo = ProductInfo.from(product, brand);
// 3. 캐시 저장
productCacheService.set(productInfo);
return productInfo;
});
}

@Transactional(readOnly = true)
public Page<ProductInfo> getProducts(Long brandId, String sort, Pageable pageable) {
// 1. 캐시 조회
Optional<Page<ProductInfo>> cached = productCacheService.getList(
brandId, sort, pageable.getPageNumber(), pageable.getPageSize()
);
if (cached.isPresent()) {
return cached.get();
}

// 2. Cache Miss → DB 조회
Pageable sortedPageable = PageRequest.of(
pageable.getPageNumber(), pageable.getPageSize(), resolveSort(sort)
);
Expand All @@ -51,26 +71,34 @@ public Page<ProductInfo> getProducts(Long brandId, String sort, Pageable pageabl
Map<Long, Brand> brandMap = brandService.getBrandsByIds(brandIds).stream()
.collect(Collectors.toMap(Brand::getId, b -> b));

return products.map(p -> {
Page<ProductInfo> result = products.map(p -> {
Brand brand = brandMap.get(p.getBrandId());
if (brand == null) {
throw new CoreException(ErrorType.NOT_FOUND, "브랜드를 찾을 수 없습니다.");
}
return ProductInfo.from(p, brand);
});

// 3. 캐시 저장
productCacheService.setList(brandId, sort, result);
return result;
}

@Transactional
public ProductInfo updateProduct(Long id, String name, Integer price, Integer stock,
String description, String imageUrl) {
Product product = productService.updateProduct(id, name, price, stock, description, imageUrl);
Brand brand = brandService.getBrand(product.getBrandId());
productCacheService.delete(id);
productCacheService.deleteListAll();
return ProductInfo.from(product, brand);
}

@Transactional
public void deleteProduct(Long id) {
productService.deleteProduct(id);
productCacheService.delete(id);
productCacheService.deleteListAll();
}

private Sort resolveSort(String sort) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
package com.loopers.infrastructure.product;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.loopers.application.product.ProductInfo;
import com.loopers.config.redis.RedisConfig;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageImpl;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import org.springframework.data.redis.core.ScanOptions;

import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;

@Slf4j
@Component
public class ProductCacheService {

private static final String KEY_PREFIX = "product:detail:";
private static final Duration TTL = Duration.ofMinutes(10);

private static final String LIST_KEY_PREFIX = "product:list:";
private static final Duration LIST_TTL = Duration.ofMinutes(5);

// 목록 캐시 직렬화용 내부 레코드
record ProductListCache(List<ProductInfo> content, long totalElements, int pageNumber, int pageSize) {}

private final RedisTemplate<String, String> defaultRedisTemplate; // 읽기 (Replica)
private final RedisTemplate<String, String> masterRedisTemplate; // 쓰기 (Master)
private final ObjectMapper objectMapper;

public ProductCacheService(
RedisTemplate<String, String> defaultRedisTemplate,
@Qualifier(RedisConfig.REDIS_TEMPLATE_MASTER) RedisTemplate<String, String> masterRedisTemplate,
ObjectMapper objectMapper
) {
this.defaultRedisTemplate = defaultRedisTemplate;
this.masterRedisTemplate = masterRedisTemplate;
this.objectMapper = objectMapper;
Comment on lines +35 to +46
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Replica 읽기로 즉시 무효화 보장이 깨진다

관련 설정인 modules/redis/src/main/java/com/loopers/config/redis/RedisConfig.java:19-69를 보면 기본 템플릿은 REPLICA_PREFERRED로 읽고, 이 클래스는 쓰기/삭제만 master로 보낸다. 이 조합은 복제 지연 동안 삭제된 키나 갱신 전 값을 replica가 계속 반환할 수 있어서, 방금 수정한 상품 상세/목록이 다시 오래된 캐시를 서빙하는 문제가 생긴다. 즉시 일관성이 필요한 상품 캐시는 읽기도 master로 통일하거나, 최소한 버전 키/namespace 방식처럼 read-after-write를 보장하는 구조로 바꾸는 편이 안전하다. 추가로 갱신 직후 즉시 조회했을 때 stale value가 반환되지 않는 통합 테스트를 두는 것이 좋다.

Also applies to: 47-57, 80-95

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java`
around lines 32 - 43, ProductCacheService currently reads with
defaultRedisTemplate (configured REPLICA_PREFERRED) while writes use
masterRedisTemplate, which can return stale values after a write; change the
cache read paths in ProductCacheService to use masterRedisTemplate for
operations that require immediate consistency (e.g.,
getProductById/getProductList) or implement a read-after-write pattern using a
versioned namespace/key (increment a version on write and read using that
version) to guarantee fresh reads; update references to
defaultRedisTemplate/masterRedisTemplate in the class and ensure
RedisConfig.REDIS_TEMPLATE_MASTER qualifier is used for reads, and add an
integration test that updates a product then immediately reads it to assert no
stale value is returned.

}

// 캐시 조회 - Redis 장애 시 Optional.empty() 반환하여 DB 폴백
public Optional<ProductInfo> get(Long productId) {
try {
String json = defaultRedisTemplate.opsForValue().get(KEY_PREFIX + productId);
if (json == null) {
return Optional.empty();
}
return Optional.of(objectMapper.readValue(json, ProductInfo.class));
} catch (Exception e) {
log.warn("캐시 조회 실패 - productId: {}, error: {}", productId, e.getMessage());
return Optional.empty();
Comment on lines +57 to +59
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# message-only 예외 로깅 패턴 점검
rg -n -C1 'log\.warn\(".*error: \{\}", .*e\.getMessage\(\)\)' \
  apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 885


🏁 Script executed:

#!/bin/bash
# ProductCacheService의 모든 로그 패턴 확인
cat -n apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java | grep -E 'log\.(warn|error|info)' 

# 파일의 전체 라인 수 확인
wc -l apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 759


🏁 Script executed:

#!/bin/bash
# ProductCacheService 전체 내용 확인 (구조 파악)
cat -n apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java | head -150

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 6466


Redis 캐시 조회/저장 실패 로그에서 예외의 cause를 누락하여 스택트레이스 추적이 불가능하다

e.getMessage()만 로깅하면 근본 원인(직렬화 실패, 연결 타임아웃, 메모리 오류 등)의 스택트레이스가 유실되어 운영 환경에서 장애 재현과 원인 분석이 지연된다. 예외 객체를 직접 전달하여 cause 체인을 보존해야 한다.

이 패턴은 6개 위치에서 반복되므로 모두 수정해야 한다:

  • 라인 58: 개별 상품 조회
  • 라인 69: 개별 상품 저장
  • 라인 78: 개별 상품 삭제
  • 라인 97: 목록 조회
  • 라인 114: 목록 저장
  • 라인 131: 목록 전체 삭제

추가로 역직렬화 실패(JsonMappingException) 및 Redis 연결 실패를 강제로 발생시키는 단위 테스트에서 Optional.empty() 폴백과 스택트레이스 포함된 로그 메시지를 함께 검증해야 한다.

수정 예시
- log.warn("캐시 조회 실패 - productId: {}, error: {}", productId, e.getMessage());
+ log.warn("캐시 조회 실패 - productId: {}", productId, e);

- log.warn("캐시 저장 실패 - productId: {}, error: {}", productInfo.id(), e.getMessage());
+ log.warn("캐시 저장 실패 - productId: {}", productInfo.id(), e);

- log.warn("캐시 삭제 실패 - productId: {}, error: {}", productId, e.getMessage());
+ log.warn("캐시 삭제 실패 - productId: {}", productId, e);

- log.warn("목록 캐시 조회 실패 - brandId: {}, sort: {}, error: {}", brandId, sort, e.getMessage());
+ log.warn("목록 캐시 조회 실패 - brandId: {}, sort: {}", brandId, sort, e);

- log.warn("목록 캐시 저장 실패 - brandId: {}, sort: {}, error: {}", brandId, sort, e.getMessage());
+ log.warn("목록 캐시 저장 실패 - brandId: {}, sort: {}", brandId, sort, e);

- log.warn("목록 캐시 전체 삭제 실패: {}", e.getMessage());
+ log.warn("목록 캐시 전체 삭제 실패", e);
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java`
around lines 57 - 59, Update the catch blocks in ProductCacheService so the
exception object is logged instead of only e.getMessage(): replace log.warn("...
error: {}", ..., e.getMessage()) with a log.warn("... error while accessing
cache for {}: ", identifier, e) (for all six occurrences handling individual
product and list get/save/delete) to preserve the cause/stacktrace; then add
unit tests that simulate JsonMappingException and a Redis connection failure to
assert the methods return Optional.empty() and that the log contains the
exception stacktrace (i.e., verify logging captured the throwable) for the
methods involved (ProductCacheService's product/list get/save/delete handlers).

}
}

// 캐시 저장 - TTL 10분
public void set(ProductInfo productInfo) {
try {
String json = objectMapper.writeValueAsString(productInfo);
masterRedisTemplate.opsForValue().set(KEY_PREFIX + productInfo.id(), json, TTL);
} catch (Exception e) {
log.warn("캐시 저장 실패 - productId: {}, error: {}", productInfo.id(), e.getMessage());
}
}

// 캐시 무효화
public void delete(Long productId) {
try {
masterRedisTemplate.delete(KEY_PREFIX + productId);
} catch (Exception e) {
log.warn("캐시 삭제 실패 - productId: {}, error: {}", productId, e.getMessage());
}
}

// 목록 캐시 조회
public Optional<Page<ProductInfo>> getList(Long brandId, String sort, int pageNumber, int pageSize) {
try {
String json = defaultRedisTemplate.opsForValue().get(listKey(brandId, sort, pageNumber, pageSize));
if (json == null) {
return Optional.empty();
}
ProductListCache cache = objectMapper.readValue(json, ProductListCache.class);
Page<ProductInfo> page = new PageImpl<>(
cache.content(),
PageRequest.of(cache.pageNumber(), cache.pageSize()),
cache.totalElements()
);
return Optional.of(page);
} catch (Exception e) {
log.warn("목록 캐시 조회 실패 - brandId: {}, sort: {}, error: {}", brandId, sort, e.getMessage());
return Optional.empty();
}
}

// 목록 캐시 저장
public void setList(Long brandId, String sort, Page<ProductInfo> page) {
try {
ProductListCache cache = new ProductListCache(
page.getContent(), page.getTotalElements(),
page.getNumber(), page.getSize()
);
String json = objectMapper.writeValueAsString(cache);
masterRedisTemplate.opsForValue().set(
listKey(brandId, sort, page.getNumber(), page.getSize()), json, LIST_TTL
);
} catch (Exception e) {
log.warn("목록 캐시 저장 실패 - brandId: {}, sort: {}, error: {}", brandId, sort, e.getMessage());
}
}

// 목록 캐시 전체 무효화 (SCAN 기반 - 블로킹 방지)
public void deleteListAll() {
try {
ScanOptions options = ScanOptions.scanOptions()
.match(LIST_KEY_PREFIX + "*")
.count(100)
.build();
List<String> keys = new ArrayList<>();
masterRedisTemplate.scan(options).forEachRemaining(keys::add);
if (!keys.isEmpty()) {
masterRedisTemplate.delete(keys);
}
Comment on lines +121 to +129
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# scan 사용 지점 및 커서 처리 패턴 점검
rg -n -C2 '\.scan\(' --type=java
rg -n -C2 'Cursor<' --type=java

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 805


🏁 Script executed:

cat -n apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java | sed -n '110,135p'

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 1252


🏁 Script executed:

rg -n 'import.*Cursor' --type=java

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 67


🌐 Web query:

Spring Data Redis Cursor AutoCloseable scan forEachRemaining memory

💡 Result:

  • In Spring Data Redis, Cursor/ScanCursor is designed to iterate incrementally: ScanCursor “continuously load[s] additional results from Redis server” as you iterate, rather than materializing the full result set up-front. [1]
  • forEachRemaining(...) is just the standard Iterator traversal; it does not inherently store everything in memoryyour consumer does (e.g., keys.add(...) collects all keys in RAM). Any memory growth is typically from what you accumulate while iterating, not from forEachRemaining itself. [2]
  • The big operational/memory pitfall is resource/connection retention if you don’t close the cursor: Spring explicitly says to close the cursor when done so implementations can clean up resources they must keep open to iterate (commonly the underlying connection), e.g., via try-with-resources. [2]
  • Also, setting a very large ScanOptions.count(...) can increase the per-roundtrip batch size returned by Redis, which can raise transient client-side memory usage per iteration (even though the scan is still incremental). [1]

Sources:
[1] Spring Data Redis ScanCursor Javadoc (current API) (docs.spring.io)
[2] Spring Data Redis Cursor Javadoc (2.7.8 API) (docs.spring.io)

Citations:


🏁 Script executed:

rg -n '\.scan\(' apps/commerce-api/src/main/java --type=java -A3

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 642


🏁 Script executed:

rg -n 'delete\(' apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java --type=java -B2 -A2

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 588


🏁 Script executed:

# deleteListAll 메서드의 호출 지점 확인
rg -n 'deleteListAll' apps/commerce-api/src/main/java --type=java

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 944


🌐 Web query:

Spring Data Redis RedisTemplate scan() Cursor AutoCloseable return type

💡 Result:

RedisTemplate#scan(ScanOptions) returns a Cursor<K> (not AutoCloseable in the method signature). However, Spring Data Redis’ Cursor is meant to be closed when you’re done (to avoid resource leaks), so you should use it with try-with-resources. [1]

In practice, the returned cursor is typically a ScanCursor, which implements Closeable and AutoCloseable (among others). [2]

ScanOptions options = ScanOptions.scanOptions().match("foo:*").count(1000).build();

try (Cursor<String> c = redisTemplate.scan(options)) {
  while (c.hasNext()) {
    String key = c.next();
    // ...
  }
}

Sources: [1] [2]


🏁 Script executed:

# 프로젝트의 Spring Data Redis 버전 확인
find . -name "pom.xml" -o -name "build.gradle" | xargs grep -l "spring-data-redis" | head -1 | xargs cat | grep -A2 -B2 "spring-data-redis"

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 67


SCAN 커서 미종료와 전체 키 메모리 적재로 연결 누수 및 메모리 압박 발생

현재 구현은 다음 두 가지 운영상 문제를 야기한다:

  1. 커서 미종료: masterRedisTemplate.scan(options).forEachRemaining(keys::add)에서 반환된 Cursor를 명시적으로 닫지 않아 Redis 연결이 보유된 상태로 유지된다. Spring Data Redis 공식 문서는 Cursor를 try-with-resources로 감싸 자동 종료하도록 권고한다.
  2. 전체 키 메모리 적재: 모든 키를 keys 리스트에 수집한 후 일괄 삭제하므로, 캐시된 상품이 많을 때 힙 사용량이 급증한다. 특히 deleteListAll()이 상품 작성/수정, 좋아요 변경 등 5개 지점에서 호출되므로 성능 영향이 크다.

배치 단위로 즉시 삭제하고 try-with-resources로 커서를 안전하게 종료하도록 수정하고, 대량 키(예: 10만 개) 시나리오에서 메모리 사용량과 무효화 지연을 측정하는 부하 테스트를 추가해야 한다.

수정 예시
+import org.springframework.data.redis.core.Cursor;
 ...
-            List<String> keys = new ArrayList<>();
-            masterRedisTemplate.scan(options).forEachRemaining(keys::add);
-            if (!keys.isEmpty()) {
-                masterRedisTemplate.delete(keys);
-            }
+            List<String> batch = new ArrayList<>(500);
+            try (Cursor<String> cursor = masterRedisTemplate.scan(options)) {
+                while (cursor.hasNext()) {
+                    batch.add(cursor.next());
+                    if (batch.size() == 500) {
+                        masterRedisTemplate.delete(batch);
+                        batch.clear();
+                    }
+                }
+            }
+            if (!batch.isEmpty()) {
+                masterRedisTemplate.delete(batch);
+            }
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@apps/commerce-api/src/main/java/com/loopers/infrastructure/product/ProductCacheService.java`
around lines 121 - 129, The scan usage in ProductCacheService (ScanOptions +
masterRedisTemplate.scan(...)) leaks Redis connections because the Cursor isn't
closed and it loads all keys into memory before deleting; change deleteListAll()
to wrap the Cursor returned by masterRedisTemplate.scan(options) in a
try-with-resources to ensure closure, accumulate keys into a small fixed-size
batch (e.g., 1000) and call masterRedisTemplate.delete(batch) repeatedly rather
than collecting all keys into one List, and add a load test that exercises
deleteListAll() with large key counts (e.g., 100k) to measure memory and
latency.

} catch (Exception e) {
log.warn("목록 캐시 전체 삭제 실패: {}", e.getMessage());
}
}

private String listKey(Long brandId, String sort, int pageNumber, int pageSize) {
String brand = brandId != null ? String.valueOf(brandId) : "all";
return LIST_KEY_PREFIX + brand + ":" + sort + ":" + pageNumber + ":" + pageSize;
}
}
29 changes: 29 additions & 0 deletions scripts/add-indexes.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
-- ============================================================
-- products 테이블 인덱스 추가 스크립트
-- 대상 DB : loopers
--
-- [실행 방법]
-- docker exec -i docker-mysql-1 mysql -uapplication -papplication < scripts/add-indexes.sql
--
-- [설계 근거]
-- idx_products_brand_likes : 브랜드 필터 + 좋아요 정렬 커버
-- idx_products_brand_price : 브랜드 필터 + 가격 정렬 커버
-- idx_products_created_at : 전체 최신순 정렬 커버
-- idx_products_brand_created : 브랜드 필터 + 최신순 정렬 커버
-- deleted_at 미포함 이유 : 100% IS NULL → 카디널리티 1 → 인덱스 효과 없음
-- ============================================================

USE loopers;

-- ------------------------------------------------------------
-- 인덱스 추가
-- ------------------------------------------------------------
CREATE INDEX idx_products_brand_likes ON products (brand_id, likes_count);
CREATE INDEX idx_products_brand_price ON products (brand_id, price);
CREATE INDEX idx_products_created_at ON products (created_at);
CREATE INDEX idx_products_brand_created ON products (brand_id, created_at);
Comment on lines +21 to +24
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Description: ProductFacade에서 brandId 없이 호출되는 정렬 조합 확인

# brandId 파라미터가 null인 상태로 정렬 옵션이 사용되는 호출 패턴 확인
rg -n -A5 'getProducts' --type=java | head -50

# findAllByDeletedAtIsNull 메서드 정의 및 사용처 확인
ast-grep --pattern 'findAllByDeletedAtIsNull($$$)'

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 5990


🏁 Script executed:

# Find ProductRepositoryImpl and understand the query patterns
fd 'ProductRepositoryImpl' -t f --type=java

# Also find ProductRepository or ProductJpaRepository
fd 'ProductRepository' -t f --type=java | grep -E '(Impl|Jpa)'

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 428


🏁 Script executed:

# Look at ProductFacade to understand sort option mappings
rg -n 'sort' apps/commerce-api/src/main/java/com/loopers/application/product/ProductFacade.java -A 3 -B 3

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 1495


🏁 Script executed:

# Check ProductService to see how getProducts handles different sort options
rg -n 'getProducts|sort|price|likes' apps/commerce-api/src/main/java/com/loopers/domain/product/ -t java -A 2 | head -100

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 6133


🏁 Script executed:

# Find ProductRepositoryImpl and examine the findProducts method
fd 'ProductRepositoryImpl' --type f

# Also check for ProductRepository interfaces
fd 'ProductRepository' --type f | grep -v test

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 354


🏁 Script executed:

# Search for findProducts implementation
rg -n 'findProducts' apps/commerce-api/src/main -A 10 | head -80

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 4057


🏁 Script executed:

# Look for findAllByDeletedAtIsNull usage
rg -n 'findAllByDeletedAtIsNull' apps/commerce-api/src/main -B 2 -A 5

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 5848


🏁 Script executed:

# Check the SQL script for idempotency (IF NOT EXISTS clause)
cat scripts/add-indexes.sql

Repository: Loopers-dev-lab/loop-pack-be-l2-vol3-java

Length of output: 1259


브랜드 필터 없이 가격 또는 좋아요순으로 정렬할 때 인덱스 미보유 및 재실행 시 실패 위험

ProductRepositoryImpl.findProducts()에서 brandId == null일 때 findAllByDeletedAtIsNull(pageable)이 호출되며, 이 경우 복합 인덱스 (brand_id, ...) 는 선택되지 않는다. 현재 인덱스 설계는 다음 쿼리 패턴을 충분히 커버하지 못한다:

  • WHERE deleted_at IS NULL ORDER BY price ASC/DESC → 인덱스 미보유 (idx_products_price 필요)
  • WHERE deleted_at IS NULL ORDER BY likes_count DESC → 인덱스 미보유 (idx_products_likes 필요)

컨트롤러에서 brandId가 선택사항(@RequestParam(required = false))이므로 이 패턴은 실제로 발생하며, 테이블 크기가 증가하면 풀 테이블 스캔으로 인한 성능 저하가 예상된다.

또한 SQL 스크립트의 CREATE INDEXIF NOT EXISTS 절이 없어 인덱스가 이미 존재할 경우 재실행 시 오류가 발생한다. 운영 환경에서의 재적용을 대비하려면 CREATE INDEX IF NOT EXISTS로 변경하거나 조건부 검사 로직이 필요하다.

권장 인덱스 추가 및 수정안
CREATE INDEX IF NOT EXISTS idx_products_brand_likes   ON products (brand_id, likes_count);
CREATE INDEX IF NOT EXISTS idx_products_brand_price   ON products (brand_id, price);
CREATE INDEX IF NOT EXISTS idx_products_created_at    ON products (created_at);
CREATE INDEX IF NOT EXISTS idx_products_brand_created ON products (brand_id, created_at);
-- 브랜드 필터 없는 정렬 쿼리 커버
CREATE INDEX IF NOT EXISTS idx_products_price         ON products (price);
CREATE INDEX IF NOT EXISTS idx_products_likes         ON products (likes_count);
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@scripts/add-indexes.sql` around lines 21 - 24, The current index set in
scripts/add-indexes.sql doesn't cover queries from
ProductRepositoryImpl.findProducts() when brandId == null (it calls
findAllByDeletedAtIsNull(pageable)), so add single-column indexes for price and
likes_count (e.g., idx_products_price on price and idx_products_likes on
likes_count) to support ORDER BY price and ORDER BY likes_count without brand
filter; also make all CREATE INDEX statements idempotent by using CREATE INDEX
IF NOT EXISTS (or equivalent conditional creation) so re-running the script
won't fail if an index already exists.


-- ------------------------------------------------------------
-- 결과 확인
-- ------------------------------------------------------------
SHOW INDEX FROM products;
85 changes: 85 additions & 0 deletions scripts/seed-data.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
-- ============================================================
-- 상품 목록 조회 성능 개선을 위한 테스트 데이터 적재 스크립트
-- 대상 DB : loopers
--
-- [실행 전 필수 조건]
-- 1. Docker 실행: docker compose -f docker/infra-compose.yml up -d
-- 2. 앱 실행: ./gradlew :apps:commerce-api:bootRun
-- → Hibernate ddl-auto: create 가 brands, products, likes 테이블을 생성함
-- 3. 앱 종료 후 이 스크립트 실행:
-- docker exec -i docker-mysql-1 mysql -uapplication -papplication < scripts/seed-data.sql
--
-- [주의] 앱을 재시작하면 ddl-auto: create 로 인해 테이블이 초기화됩니다.
-- 데이터를 유지하려면 jpa.yml 의 ddl-auto 를 validate 로 변경 후 재시작하세요.
-- ============================================================

USE loopers;

-- ------------------------------------------------------------
-- 1. 브랜드 20개 INSERT
-- ------------------------------------------------------------
INSERT INTO brands (name, description, created_at, updated_at) VALUES
('Nike', 'Just Do It', NOW(), NOW()),
('Adidas', 'Impossible is Nothing', NOW(), NOW()),
('Puma', 'Forever Faster', NOW(), NOW()),
('New Balance', 'Fearlessly Independent', NOW(), NOW()),
('Reebok', 'Be More Human', NOW(), NOW()),
('Under Armour','The Only Way is Through', NOW(), NOW()),
('Converse', 'Shoes Are Boring. Wear Sneakers', NOW(), NOW()),
('Vans', 'Off the Wall', NOW(), NOW()),
('FILA', 'For the love of sport', NOW(), NOW()),
('Asics', 'Sound Mind Sound Body', NOW(), NOW()),
('Saucony', 'Run Your World', NOW(), NOW()),
('Brooks', 'Run Happy', NOW(), NOW()),
('Mizuno', 'For the Love of Sport', NOW(), NOW()),
('Salomon', 'Time to Play', NOW(), NOW()),
('Columbia', 'Tested Tough', NOW(), NOW()),
('Patagonia', 'We are in Business to Save Our Home Planet', NOW(), NOW()),
('North Face', 'Never Stop Exploring', NOW(), NOW()),
('Lululemon', 'Elevate the World from Mediocrity', NOW(), NOW()),
('Champion', 'It Takes a Little More', NOW(), NOW()),
('Lacoste', 'A Little Green Crocodile', NOW(), NOW());

-- ------------------------------------------------------------
-- 2. 상품 100,000개 INSERT (Recursive CTE - 빠른 방식)
-- brand_id : 1~20 랜덤 분포
-- price : 1,000 ~ 500,000 (1,000원 단위)
-- stock : 0 ~ 1,000
-- likes_count: 0 ~ 10,000 (롱테일 분포 - 대부분 낮고 소수만 높음)
-- created_at : 최근 1년 내 랜덤 날짜
-- ------------------------------------------------------------
SET cte_max_recursion_depth = 100000;

INSERT INTO products
(brand_id, name, price, stock, description, image_url,
version, likes_count, created_at, updated_at, deleted_at)
WITH RECURSIVE nums AS (
SELECT 1 AS n
UNION ALL
SELECT n + 1 FROM nums WHERE n < 100000
)
SELECT
FLOOR(1 + RAND() * 20),
CONCAT('상품_', LPAD(n, 6, '0')),
FLOOR(1 + RAND() * 500) * 1000,
FLOOR(RAND() * 1001),
CONCAT('상품 ', n, '번의 상세 설명입니다.'),
CONCAT('https://cdn.example.com/products/', n, '.jpg'),
0,
FLOOR(POW(RAND(), 3) * 10001),
DATE_SUB(NOW(), INTERVAL FLOOR(RAND() * 365) DAY),
DATE_SUB(NOW(), INTERVAL FLOOR(RAND() * 365) DAY),
Comment on lines +70 to +71
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

updated_atcreated_at보다 과거가 될 수 있다

현재 두 컬럼을 독립적으로 랜덤 생성해서 시간 역전 데이터가 만들어질 수 있다. 이런 샘플은 정렬·감사·변경 이력 관련 검증을 왜곡하므로 created_at을 먼저 계산한 뒤 updated_at >= created_at이 되도록 파생시키는 편이 안전하다. 적재 후에는 updated_at < created_at 건수가 0인지 확인하는 검증 쿼리도 같이 추가하는 것이 좋다.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@scripts/seed-data.sql` around lines 70 - 71, 생성 스크립트가 created_at과 updated_at을
독립적으로 랜덤 생성해(updated_at이 created_at보다 과거가 될 수 있음) 문제이니, 먼저 created_at을 생성(예:
DATE_SUB(NOW(), INTERVAL FLOOR(RAND()*365) DAY) using created_at)한 다음
updated_at을 created_at보다 같거나 최신이 되도록 파생시키세요 (예: updated_at := created_at +
INTERVAL FLOOR(RAND()*X) DAY 또는 GREATEST(created_at, DATE_SUB(...))). 적용 대상 식은
현재 파일의 DATE_SUB(NOW(), INTERVAL FLOOR(RAND() * 365) DAY) 표현을 사용하는 삽입/VALUES 로직과
관련 있으며, 또한 데이터 적재 후 검증 쿼리(SELECT COUNT(*) FROM ... WHERE updated_at <
created_at)를 추가해 위 조건을 위반하는 레코드가 0인지 확인하도록 하세요.

NULL
FROM nums;

-- ------------------------------------------------------------
-- 3. 결과 확인
-- ------------------------------------------------------------
SELECT
COUNT(*) AS total_products,
COUNT(DISTINCT brand_id) AS brand_count,
MIN(price) AS min_price,
MAX(price) AS max_price,
ROUND(AVG(likes_count), 1) AS avg_likes,
MAX(likes_count) AS max_likes
FROM products;