问题背景

某天 跑 sparkSQL 的时候,遇到报错:
org.apache.spark.SparkException: Job aborted.
at org.apache.spark.sql.execution.datasources.FileFormatWriter.write(FileFormatWriter.scala:198)atorg.apache.spark.sql.hive.execution.SaveAsHiveFile.write(FileFormatWriter.scala:198) at org.apache.spark.sql.hive.execution.SaveAsHiveFile.write(FileFormatWriter.scala:198)atorg.apache.spark.sql.hive.execution.SaveAsHiveFileclass.saveAsHiveFile(SaveAsHiveFile.scala:86)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.saveAsHiveFile(InsertIntoHiveTable.scala:66)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.processInsert(InsertIntoHiveTable.scala:195)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.run(InsertIntoHiveTable.scala:99)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResultlzycompute(commands.scala:104)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115)atorg.apache.spark.sql.Datasetlzycompute(commands.scala:104) at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102) at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115) at org.apache.spark.sql.Datasetlzycompute(commands.scala:104)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115)atorg.apache.spark.sql.Dataset$anonfun6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset6.apply(Dataset.scala:194) at org.apache.spark.sql.Dataset6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset$anonfun6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset6.apply(Dataset.scala:194) at org.apache.spark.sql.Dataset6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset$anonfun52.apply(Dataset.scala:3370)atorg.apache.spark.sql.execution.SQLExecution52.apply(Dataset.scala:3370) at org.apache.spark.sql.execution.SQLExecution52.apply(Dataset.scala:3370)atorg.apache.spark.sql.execution.SQLExecutionanonfunanonfunanonfunwithNewExecutionId1.apply(SQLExecution.scala:80)atorg.apache.spark.sql.execution.SQLExecution1.apply(SQLExecution.scala:80) at org.apache.spark.sql.execution.SQLExecution1.apply(SQLExecution.scala:80)atorg.apache.spark.sql.execution.SQLExecution.withSQLConfPropagated(SQLExecution.scala:127)
at org.apache.spark.sql.execution.SQLExecution.withNewExecutionId(SQLExecution.scala:75)atorg.apache.spark.sql.Dataset.org.withNewExecutionId(SQLExecution.scala:75) at org.apache.spark.sql.Dataset.org.withNewExecutionId(SQLExecution.scala:75)atorg.apache.spark.sql.Dataset.orgapachesparksparksparksqlDatasetDatasetDatasetwithAction(Dataset.scala:3369)atorg.apache.spark.sql.Dataset.<init>(Dataset.scala:194)atorg.apache.spark.sql.DatasetwithAction(Dataset.scala:3369) at org.apache.spark.sql.Dataset.<init>(Dataset.scala:194) at org.apache.spark.sql.DatasetwithAction(Dataset.scala:3369)atorg.apache.spark.sql.Dataset.<init>(Dataset.scala:194)atorg.apache.spark.sql.Dataset.ofRows(Dataset.scala:79)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:643)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)
at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:62)
at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:371)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala:274)atorg.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)atsun.reflect.NativeMethodAccessorImpl.invoke0(NativeMethod)atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)atjava.lang.reflect.Method.invoke(Method.java:498)atorg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)atorg.apache.spark.deploy.SparkSubmit.org.main(SparkSQLCLIDriver.scala:274) at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52) at org.apache.spark.deploy.SparkSubmit.org.main(SparkSQLCLIDriver.scala:274)atorg.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)atsun.reflect.NativeMethodAccessorImpl.invoke0(NativeMethod)atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)atjava.lang.reflect.Method.invoke(Method.java:498)atorg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)atorg.apache.spark.deploy.SparkSubmit.orgapachesparksparksparkdeploySparkSubmitSparkSubmitSparkSubmit$runMain(SparkSubmit.scala:845)
at org.apache.spark.deploy.SparkSubmit.doRunMain1(SparkSubmit.scala:161)atorg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)atorg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)atorg.apache.spark.deploy.SparkSubmit1(SparkSubmit.scala:161) at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184) at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86) at org.apache.spark.deploy.SparkSubmit1(SparkSubmit.scala:161)atorg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)atorg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)atorg.apache.spark.deploy.SparkSubmit$anon2.doSubmit(SparkSubmit.scala:920)atorg.apache.spark.deploy.SparkSubmit2.doSubmit(SparkSubmit.scala:920) at org.apache.spark.deploy.SparkSubmit2.doSubmit(SparkSubmit.scala:920)atorg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala:929)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 6 in stage 16.0 failed 4 times, most recent failure: Lost task 6.3 in stage 16.0 (TID 478, idc-sql-dms-13, executor 40): ExecutorLostFailure (executor 40 exited caused by one of the running tasks) Reason: Container killed by YARN for exceeding memory limits. 11.8 GB of 11 GB physical memory used. Consider boosting spark.yarn.executor.memoryOverhead or disabling yarn.nodemanager.vmem-check-enabled because of YARN-4714.
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.orgapacheapacheapachesparkschedulerschedulerschedulerDAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925)atorg.apache.spark.scheduler.DAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925) at org.apache.spark.scheduler.DAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925)atorg.apache.spark.scheduler.DAGScheduleranonfun$abortStage1.apply(DAGScheduler.scala:1913)atorg.apache.spark.scheduler.DAGScheduler1.apply(DAGScheduler.scala:1913) at org.apache.spark.scheduler.DAGScheduler1.apply(DAGScheduler.scala:1913)atorg.apache.spark.scheduler.DAGScheduleranonfunanonfunanonfunabortStage1.apply(DAGScheduler.scala:1912)atscala.collection.mutable.ResizableArray1.apply(DAGScheduler.scala:1912) at scala.collection.mutable.ResizableArray1.apply(DAGScheduler.scala:1912)atscala.collection.mutable.ResizableArrayclass.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1912)
at org.apache.spark.scheduler.DAGSchedulerKaTeX parse error: Can't use function '$' in math mode at position 8: anonfun$̲handleTaskSetFa…anonfun$handleTaskSetFailed1.apply(DAGScheduler.scala:948)atscala.Option.foreach(Option.scala:257)atorg.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084)atorg.apache.spark.util.EventLoop1.apply(DAGScheduler.scala:948) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084) at org.apache.spark.util.EventLoop1.apply(DAGScheduler.scala:948)atscala.Option.foreach(Option.scala:257)atorg.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084)atorg.apache.spark.util.EventLoop$anon1.run(EventLoop.scala:49)atorg.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)atorg.apache.spark.SparkContext.runJob(SparkContext.scala:2061)atorg.apache.spark.sql.execution.datasources.FileFormatWriter1.run(EventLoop.scala:49) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061) at org.apache.spark.sql.execution.datasources.FileFormatWriter1.run(EventLoop.scala:49)atorg.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)atorg.apache.spark.SparkContext.runJob(SparkContext.scala:2061)atorg.apache.spark.sql.execution.datasources.FileFormatWriter.write(FileFormatWriter.scala:167)
… 35 more
org.apache.spark.SparkException: Job aborted.
at org.apache.spark.sql.execution.datasources.FileFormatWriter.write(FileFormatWriter.scala:198)atorg.apache.spark.sql.hive.execution.SaveAsHiveFile.write(FileFormatWriter.scala:198) at org.apache.spark.sql.hive.execution.SaveAsHiveFile.write(FileFormatWriter.scala:198)atorg.apache.spark.sql.hive.execution.SaveAsHiveFileclass.saveAsHiveFile(SaveAsHiveFile.scala:86)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.saveAsHiveFile(InsertIntoHiveTable.scala:66)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.processInsert(InsertIntoHiveTable.scala:195)
at org.apache.spark.sql.hive.execution.InsertIntoHiveTable.run(InsertIntoHiveTable.scala:99)
at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResultlzycompute(commands.scala:104)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115)atorg.apache.spark.sql.Datasetlzycompute(commands.scala:104) at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102) at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115) at org.apache.spark.sql.Datasetlzycompute(commands.scala:104)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:102)atorg.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:115)atorg.apache.spark.sql.Dataset$anonfun6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset6.apply(Dataset.scala:194) at org.apache.spark.sql.Dataset6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset$anonfun6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset6.apply(Dataset.scala:194) at org.apache.spark.sql.Dataset6.apply(Dataset.scala:194)atorg.apache.spark.sql.Dataset$anonfun52.apply(Dataset.scala:3370)atorg.apache.spark.sql.execution.SQLExecution52.apply(Dataset.scala:3370) at org.apache.spark.sql.execution.SQLExecution52.apply(Dataset.scala:3370)atorg.apache.spark.sql.execution.SQLExecutionanonfunanonfunanonfunwithNewExecutionId1.apply(SQLExecution.scala:80)atorg.apache.spark.sql.execution.SQLExecution1.apply(SQLExecution.scala:80) at org.apache.spark.sql.execution.SQLExecution1.apply(SQLExecution.scala:80)atorg.apache.spark.sql.execution.SQLExecution.withSQLConfPropagated(SQLExecution.scala:127)
at org.apache.spark.sql.execution.SQLExecution.withNewExecutionId(SQLExecution.scala:75)atorg.apache.spark.sql.Dataset.org.withNewExecutionId(SQLExecution.scala:75) at org.apache.spark.sql.Dataset.org.withNewExecutionId(SQLExecution.scala:75)atorg.apache.spark.sql.Dataset.orgapachesparksparksparksqlDatasetDatasetDatasetwithAction(Dataset.scala:3369)atorg.apache.spark.sql.Dataset.<init>(Dataset.scala:194)atorg.apache.spark.sql.DatasetwithAction(Dataset.scala:3369) at org.apache.spark.sql.Dataset.<init>(Dataset.scala:194) at org.apache.spark.sql.DatasetwithAction(Dataset.scala:3369)atorg.apache.spark.sql.Dataset.<init>(Dataset.scala:194)atorg.apache.spark.sql.Dataset.ofRows(Dataset.scala:79)
at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:643)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:694)
at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:62)
at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:371)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)
at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala:274)atorg.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)atsun.reflect.NativeMethodAccessorImpl.invoke0(NativeMethod)atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)atjava.lang.reflect.Method.invoke(Method.java:498)atorg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)atorg.apache.spark.deploy.SparkSubmit.org.main(SparkSQLCLIDriver.scala:274) at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52) at org.apache.spark.deploy.SparkSubmit.org.main(SparkSQLCLIDriver.scala:274)atorg.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)atsun.reflect.NativeMethodAccessorImpl.invoke0(NativeMethod)atsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)atsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)atjava.lang.reflect.Method.invoke(Method.java:498)atorg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)atorg.apache.spark.deploy.SparkSubmit.orgapachesparksparksparkdeploySparkSubmitSparkSubmitSparkSubmit$runMain(SparkSubmit.scala:845)
at org.apache.spark.deploy.SparkSubmit.doRunMain1(SparkSubmit.scala:161)atorg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)atorg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)atorg.apache.spark.deploy.SparkSubmit1(SparkSubmit.scala:161) at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184) at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86) at org.apache.spark.deploy.SparkSubmit1(SparkSubmit.scala:161)atorg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)atorg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)atorg.apache.spark.deploy.SparkSubmit$anon2.doSubmit(SparkSubmit.scala:920)atorg.apache.spark.deploy.SparkSubmit2.doSubmit(SparkSubmit.scala:920) at org.apache.spark.deploy.SparkSubmit2.doSubmit(SparkSubmit.scala:920)atorg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala:929)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 6 in stage 16.0 failed 4 times, most recent failure: Lost task 6.3 in stage 16.0 (TID 478, idc-sql-dms-13, executor 40): ExecutorLostFailure (executor 40 exited caused by one of the running tasks) Reason: Container killed by YARN for exceeding memory limits. 11.8 GB of 11 GB physical memory used. Consider boosting spark.yarn.executor.memoryOverhead or disabling yarn.nodemanager.vmem-check-enabled because of YARN-4714.
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.orgapacheapacheapachesparkschedulerschedulerschedulerDAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925)atorg.apache.spark.scheduler.DAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925) at org.apache.spark.scheduler.DAGSchedulerfailJobAndIndependentStages(DAGScheduler.scala:1925)atorg.apache.spark.scheduler.DAGScheduleranonfun$abortStage1.apply(DAGScheduler.scala:1913)atorg.apache.spark.scheduler.DAGScheduler1.apply(DAGScheduler.scala:1913) at org.apache.spark.scheduler.DAGScheduler1.apply(DAGScheduler.scala:1913)atorg.apache.spark.scheduler.DAGScheduleranonfunanonfunanonfunabortStage1.apply(DAGScheduler.scala:1912)atscala.collection.mutable.ResizableArray1.apply(DAGScheduler.scala:1912) at scala.collection.mutable.ResizableArray1.apply(DAGScheduler.scala:1912)atscala.collection.mutable.ResizableArrayclass.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1912)
at org.apache.spark.scheduler.DAGSchedulerKaTeX parse error: Can't use function '$' in math mode at position 8: anonfun$̲handleTaskSetFa…anonfun$handleTaskSetFailed1.apply(DAGScheduler.scala:948)atscala.Option.foreach(Option.scala:257)atorg.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084)atorg.apache.spark.util.EventLoop1.apply(DAGScheduler.scala:948) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084) at org.apache.spark.util.EventLoop1.apply(DAGScheduler.scala:948)atscala.Option.foreach(Option.scala:257)atorg.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095)atorg.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084)atorg.apache.spark.util.EventLoop$anon1.run(EventLoop.scala:49)atorg.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)atorg.apache.spark.SparkContext.runJob(SparkContext.scala:2061)atorg.apache.spark.sql.execution.datasources.FileFormatWriter1.run(EventLoop.scala:49) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061) at org.apache.spark.sql.execution.datasources.FileFormatWriter1.run(EventLoop.scala:49)atorg.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)atorg.apache.spark.SparkContext.runJob(SparkContext.scala:2061)atorg.apache.spark.sql.execution.datasources.FileFormatWriter.write(FileFormatWriter.scala:167)
… 35 more

通过上面的日志,大概了解到任务失败的原因应该是内存超过限定。 “Container killed by YARN for exceeding memory limits. ”,解决问题的第一思路是 sql 能不能优化下,加内存属于下下策。

解决办法

先讲一下原来的 sql 思路:

SELECT a.name, a.age, b.alias
from a
left join (
    SELECT id, concat_ws(',', COLLECT_LIST(alias)) alias
	from bb
	group by id
) b

这是一个很简单的逻辑,猜测问题应该出现在 collect_ws() 函数, 当 b 表根据 id 聚合的时候,如果大量的数据 加载到 list (COLLECT_LIST)里面,将导致内存耗尽。

解决思路 应该是先把重复数据去掉,再调用 concat_ws(’,’ , COLLECT_LIST(alias)),优化后的sql 如下:

SELECT a.name, a.age, b.alias
from a
left join (
    SELECT id, collect_ws(',', COLLECT_LIST(alias)) alias
	from (
	    SELECT id, alias
		from bb 
		group by id, alias
	)
	group by id
) b

还有更简单的一种写法,就是使用 COLLECT_SET 代替 COLLECT_LIST:

SELECT a.name, a.age, b.alias
from a
left join (
    SELECT id, collect_ws(',', COLLECT_SET(alias)) alias
	from bb
	group by id
) b

哈哈,问题解决!

Logo

有“AI”的1024 = 2048,欢迎大家加入2048 AI社区

更多推荐