hive 血缘
Last updated on November 20, 2024 am
🧙 Questions
☄️ Ideas
依赖导入
<dependencies>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>3.1.2</version>
</dependency>
</dependencies>
解析sql血缘
package com.example.demo;
import lombok.SneakyThrows;
import org.apache.hadoop.hive.ql.tools.LineageInfo;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@Service
public class HiveSqlService {
@SneakyThrows
public void parseHiveLineageInfo(String sql) {
LineageInfo lineageInfo = new LineageInfo();
lineageInfo.getLineageInfo(sql);
// 输出表
List<String> outTables = new ArrayList<>();
lineageInfo.getOutputTableList().forEach(e -> {
if (!outTables.contains(e)) {
outTables.add(e);
}
});
System.out.println("outputTables:" + Arrays.toString(outTables.toArray()));
// 输入表
List<String> inputTables = new ArrayList<>();
lineageInfo.getInputTableList().forEach(e -> {
if (!inputTables.contains(e)) {
inputTables.add(e);
}
});
System.out.println("inputTables:" + Arrays.toString(inputTables.toArray()));
}
}
解析sql
package com.example.demo;
import lombok.SneakyThrows;
import org.apache.hadoop.hive.ql.lib.*;
import org.apache.hadoop.hive.ql.parse.*;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Stack;
@Service
public class HiveSqlService implements NodeProcessor {
@SneakyThrows
public void parseHiveSql(String sql) {
ASTNode tree = ParseUtils.parse(sql, null);
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
Map<Rule, NodeProcessor> rules = new LinkedHashMap<>();
Dispatcher dispatcher = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(dispatcher);
ArrayList<Node> topNodes = new ArrayList<>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
@Override
public Object process(Node node, Stack<Node> stack, NodeProcessorCtx nodeProcessorCtx, Object... objects) throws SemanticException {
ASTNode pt = (ASTNode) node;
switch (pt.getToken().getType()) {
// 修改的是哪张表
case HiveParser.TOK_ALTERTABLE:
String alterTableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0));
System.out.println("修改了表:" + alterTableName);
break;
// 修改后的表名
case HiveParser.TOK_ALTERTABLE_RENAME:
String afterAlterTableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0).getChild(0));
System.out.println("修改后的表名:" + afterAlterTableName);
break;
// 创建表
case HiveParser.TOK_CREATETABLE:
String creatTableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0).getChild(0));
System.out.println("创建新的表名:" + creatTableName);
break;
// 删除表
case HiveParser.TOK_DROPTABLE:
String deleteTableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0).getChild(0));
System.out.println("删除表名:" + deleteTableName);
break;
// 添加表字段
case HiveParser.TOK_ALTERTABLE_ADDCOLS:
String newCol = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0));
String colType = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(2));
System.out.println("添加字段:" + newCol + "字段类型为:" + colType);
break;
// 字段重命名
case HiveParser.TOK_ALTERTABLE_RENAMECOL:
String prevCol = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(0));
String nextCol = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(1));
String type = BaseSemanticAnalyzer.getUnescapedName((ASTNode) pt.getChild(2));
System.out.println("将" + prevCol + "修改为" + nextCol + "字段类型为" + type);
break;
}
return null;
}
}
🔗 Links
hive 血缘
https://ispong.isxcode.com/hadoop/hive/hive 血缘/