From 237410ef3c9e0a581679fcc93a267b2e39267a8d Mon Sep 17 00:00:00 2001 From: gmodena Date: Wed, 5 May 2021 14:29:15 +0200 Subject: [PATCH 1/3] Remove the language filter for wikidata labels. Bump Spark Driver memory to acount for larger results set. The memory upper bound was found to allow the job to complete on enwiki. This change is experimental, and meant to enable analysis/experimentation. --- algorithm.ipynb | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/algorithm.ipynb b/algorithm.ipynb index 7776294..59df500 100644 --- a/algorithm.ipynb +++ b/algorithm.ipynb @@ -77,7 +77,7 @@ "# Under the hood the library uses findspark to initialise\n", "# Spark's environment. pyspark imports will be available \n", "# after initialisation\n", - "spark = get_session(type='regular', app_name=\"ImageRec-DEV Training\")\n", + "spark = get_session(type='regular', app_name=\"ImageRec-DEV Training\", extra_settings={'spark.driver.memory': '2048'})\n", "import pyspark\n", "import pyspark.sql" ] @@ -301,7 +301,6 @@ " LATERAL VIEW explode(labels) t AS label_lang,label_val\n", " LATERAL VIEW OUTER explode(claims) c AS claim\n", " WHERE typ='item'\n", - " AND t.label_lang='\"\"\"+label_lang+\"\"\"'\n", " AND snapshot='\"\"\"+snapshot+\"\"\"'\n", " AND claim.mainSnak.property in ('P18','P31','P373')\n", " GROUP BY id,label_val\n", @@ -597,10 +596,10 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.7.3" }, "toc-showtags": true }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} From 61d156c831e80fa4d181f48bf6ebdb5f7a4a8fca Mon Sep 17 00:00:00 2001 From: gmodena Date: Thu, 6 May 2021 23:06:42 +0200 Subject: [PATCH 2/3] Fix. specify memory unit of measurement --- algorithm.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/algorithm.ipynb b/algorithm.ipynb index 59df500..5834e41 100644 --- a/algorithm.ipynb +++ b/algorithm.ipynb @@ -77,7 +77,7 @@ "# Under the hood the library uses findspark to initialise\n", "# Spark's environment. pyspark imports will be available \n", "# after initialisation\n", - "spark = get_session(type='regular', app_name=\"ImageRec-DEV Training\", extra_settings={'spark.driver.memory': '2048'})\n", + "spark = get_session(type='regular', app_name=\"ImageRec-DEV Training\", extra_settings={'spark.driver.memory': '2048M'})\n", "import pyspark\n", "import pyspark.sql" ] From 894d5c29678a7f4e4ebcae673f6763ce2cadfc8e Mon Sep 17 00:00:00 2001 From: gmodena Date: Thu, 27 May 2021 10:35:01 +0200 Subject: [PATCH 3/3] WIP: memory tweaks to generate enwiki. This commit adds some tweaks to spark init, memory limits and garbage collection policies needed to meet enwiki memory requirements. --- algorithm.ipynb | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/algorithm.ipynb b/algorithm.ipynb index 5834e41..0c3cbf5 100644 --- a/algorithm.ipynb +++ b/algorithm.ipynb @@ -77,9 +77,23 @@ "# Under the hood the library uses findspark to initialise\n", "# Spark's environment. pyspark imports will be available \n", "# after initialisation\n", - "spark = get_session(type='regular', app_name=\"ImageRec-DEV Training\", extra_settings={'spark.driver.memory': '2048M'})\n", + "# spark = get_session(type='regular', extra_settings={'spark.driver.maxResultSize': '2048M'})\n", + "import os\n", + "import findspark\n", + "SPARK_HOME = os.environ.get(\"SPARK_HOME\", \"/usr/lib/spark2\")\n", + "findspark.init(SPARK_HOME)\n", + "findspark._add_to_submit_args('--driver-memory 64G --conf \"spark.driver.extraJavaOptions=-XX:+UseG1GC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps\"')\n", + "master = 'yarn'\n", + "app_name = 'gmodena-imagematching-driver-highmem'\n", "import pyspark\n", - "import pyspark.sql" + "from pyspark.sql import SparkSession\n", + "\n", + "spark = (\n", + " SparkSession.builder\n", + " .master(master)\n", + " .config('spark.driver.maxResultSize', '2048M')\n", + " .appName(app_name)\n", + " ).getOrCreate()" ] }, {