10.4 数据库架构演进
面试重要程度:⭐⭐⭐⭐⭐
常见提问方式:如何进行分库分表?读写分离如何实现?数据库架构如何演进?
预计阅读时间:45分钟
🎯 数据库架构演进路径
单体数据库到分布式数据库
/**
* 数据库架构演进阶段
*/
@Component
public class DatabaseArchitectureEvolution {
/**
* 架构演进阶段定义
*/
public enum EvolutionStage {
SINGLE_DB("单体数据库", "单机MySQL", "< 1万QPS"),
MASTER_SLAVE("主从复制", "一主多从", "< 10万QPS"),
READ_WRITE_SPLIT("读写分离", "读写分离中间件", "< 50万QPS"),
HORIZONTAL_SHARDING("水平分片", "分库分表", "< 100万QPS"),
DISTRIBUTED_DB("分布式数据库", "分布式事务", "> 100万QPS");
private final String name;
private final String description;
private final String capacity;
}
/**
* 架构演进决策器
*/
@Component
public static class ArchitectureDecisionMaker {
/**
* 根据业务指标决定架构演进方向
*/
public EvolutionStage decideNextStage(DatabaseMetrics metrics) {
long qps = metrics.getQps();
long dataSize = metrics.getDataSizeGB();
double cpuUsage = metrics.getCpuUsage();
double memoryUsage = metrics.getMemoryUsage();
if (qps < 10000 && dataSize < 100 && cpuUsage < 70) {
return EvolutionStage.SINGLE_DB;
} else if (qps < 100000 && cpuUsage > 80) {
return EvolutionStage.MASTER_SLAVE;
} else if (qps < 500000 && hasReadWriteImbalance(metrics)) {
return EvolutionStage.READ_WRITE_SPLIT;
} else if (dataSize > 1000 || qps > 500000) {
return EvolutionStage.HORIZONTAL_SHARDING;
} else {
return EvolutionStage.DISTRIBUTED_DB;
}
}
private boolean hasReadWriteImbalance(DatabaseMetrics metrics) {
return metrics.getReadQps() / (double) metrics.getWriteQps() > 3;
}
/**
* 生成演进建议
*/
public EvolutionRecommendation generateRecommendation(DatabaseMetrics metrics) {
EvolutionStage nextStage = decideNextStage(metrics);
return EvolutionRecommendation.builder()
.currentStage(metrics.getCurrentStage())
.recommendedStage(nextStage)
.reasons(generateReasons(metrics, nextStage))
.migrationSteps(generateMigrationSteps(nextStage))
.estimatedEffort(estimateEffort(nextStage))
.risks(identifyRisks(nextStage))
.build();
}
}
}
📖 读写分离架构
主从复制与读写分离实现
/**
* 读写分离架构实现
*/
@Configuration
public class ReadWriteSplitConfiguration {
/**
* 主从数据源配置
*/
@Bean
@Primary
public DataSource masterDataSource() {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:mysql://master.db.com:3306/app_db");
config.setUsername("app_user");
config.setPassword("app_password");
config.setMaximumPoolSize(50);
config.setMinimumIdle(10);
return new HikariDataSource(config);
}
@Bean
public DataSource slave1DataSource() {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:mysql://slave1.db.com:3306/app_db");
config.setUsername("readonly_user");
config.setPassword("readonly_password");
config.setMaximumPoolSize(30);
config.setMinimumIdle(5);
config.setReadOnly(true);
return new HikariDataSource(config);
}
@Bean
public DataSource slave2DataSource() {
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:mysql://slave2.db.com:3306/app_db");
config.setUsername("readonly_user");
config.setPassword("readonly_password");
config.setMaximumPoolSize(30);
config.setMinimumIdle(5);
config.setReadOnly(true);
return new HikariDataSource(config);
}
/**
* 动态数据源路由
*/
@Bean
public DataSource routingDataSource() {
DynamicRoutingDataSource routingDataSource = new DynamicRoutingDataSource();
Map<Object, Object> dataSourceMap = new HashMap<>();
dataSourceMap.put("master", masterDataSource());
dataSourceMap.put("slave1", slave1DataSource());
dataSourceMap.put("slave2", slave2DataSource());
routingDataSource.setTargetDataSources(dataSourceMap);
routingDataSource.setDefaultTargetDataSource(masterDataSource());
return routingDataSource;
}
/**
* 动态数据源实现
*/
public static class DynamicRoutingDataSource extends AbstractRoutingDataSource {
@Override
protected Object determineCurrentLookupKey() {
return DataSourceContextHolder.getDataSourceType();
}
}
/**
* 数据源上下文持有者
*/
public static class DataSourceContextHolder {
private static final ThreadLocal<String> CONTEXT_HOLDER = new ThreadLocal<>();
public static void setDataSourceType(String dataSourceType) {
CONTEXT_HOLDER.set(dataSourceType);
}
public static String getDataSourceType() {
return CONTEXT_HOLDER.get();
}
public static void clearDataSourceType() {
CONTEXT_HOLDER.remove();
}
}
}
读写分离中间件
/**
* 读写分离中间件实现
*/
@Component
public class ReadWriteSplitMiddleware {
/**
* 读写分离注解
*/
@Target({ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
public @interface DataSource {
String value() default "master";
}
/**
* 读写分离切面
*/
@Aspect
@Component
public static class DataSourceAspect {
private final List<String> slaveDataSources = Arrays.asList("slave1", "slave2");
private final AtomicInteger counter = new AtomicInteger(0);
@Around("@annotation(dataSource)")
public Object around(ProceedingJoinPoint joinPoint, DataSource dataSource) throws Throwable {
try {
String targetDataSource = determineDataSource(dataSource, joinPoint);
DataSourceContextHolder.setDataSourceType(targetDataSource);
return joinPoint.proceed();
} finally {
DataSourceContextHolder.clearDataSourceType();
}
}
/**
* 确定目标数据源
*/
private String determineDataSource(DataSource dataSource, ProceedingJoinPoint joinPoint) {
String specifiedDataSource = dataSource.value();
// 如果明确指定了数据源,直接使用
if (!"auto".equals(specifiedDataSource)) {
return specifiedDataSource;
}
// 自动判断读写操作
String methodName = joinPoint.getSignature().getName().toLowerCase();
if (isReadOperation(methodName)) {
return selectSlaveDataSource();
} else {
return "master";
}
}
/**
* 判断是否为读操作
*/
private boolean isReadOperation(String methodName) {
return methodName.startsWith("select") ||
methodName.startsWith("get") ||
methodName.startsWith("find") ||
methodName.startsWith("query") ||
methodName.startsWith("count") ||
methodName.startsWith("exists");
}
/**
* 选择从库数据源(负载均衡)
*/
private String selectSlaveDataSource() {
int index = counter.getAndIncrement() % slaveDataSources.size();
return slaveDataSources.get(index);
}
}
/**
* 主从延迟监控
*/
@Component
public static class ReplicationLagMonitor {
@Autowired
private JdbcTemplate masterJdbcTemplate;
@Autowired
private JdbcTemplate slaveJdbcTemplate;
/**
* 检查主从延迟
*/
@Scheduled(fixedRate = 30000) // 30秒检查一次
public void checkReplicationLag() {
try {
// 获取主库位置
String masterStatus = masterJdbcTemplate.queryForObject(
"SHOW MASTER STATUS", String.class);
// 获取从库状态
Map<String, Object> slaveStatus = slaveJdbcTemplate.queryForMap(
"SHOW SLAVE STATUS");
long lagSeconds = calculateLag(masterStatus, slaveStatus);
if (lagSeconds > 10) { // 延迟超过10秒告警
log.warn("Replication lag detected: {} seconds", lagSeconds);
handleReplicationLag(lagSeconds);
}
} catch (Exception e) {
log.error("Failed to check replication lag", e);
}
}
private long calculateLag(String masterStatus, Map<String, Object> slaveStatus) {
// 计算主从延迟的具体逻辑
return 0;
剩余60%内容,订阅专栏后可继续查看/也可单篇购买
Java面试圣经 文章被收录于专栏
Java面试圣经,带你练透java圣经
