From a4a6a9c7bd6d241b600063cdaed9f686a085c3c8 Mon Sep 17 00:00:00 2001 From: <> Date: Tue, 11 Jul 2023 13:34:25 +0000 Subject: [PATCH] Deployed 0d976ba with MkDocs version: 1.4.2 --- .../study_locus/_study_locus/index.html | 2 +- .../dataset/summary_statistics/index.html | 527 +++++------------- objects.inv | Bin 1485 -> 1465 bytes search/search_index.json | 2 +- sitemap.xml.gz | Bin 219 -> 219 bytes 5 files changed, 154 insertions(+), 377 deletions(-) diff --git a/components/dataset/study_locus/_study_locus/index.html b/components/dataset/study_locus/_study_locus/index.html index 80e257249..0b6d60f2a 100644 --- a/components/dataset/study_locus/_study_locus/index.html +++ b/components/dataset/study_locus/_study_locus/index.html @@ -1073,7 +1073,7 @@ |-- betaConfidenceIntervalUpper: double (nullable = true) |-- pValueMantissa: float (nullable = true) |-- pValueExponent: integer (nullable = true) - |-- effectAlleleFrequencyFromSource: double (nullable = true) + |-- effectAlleleFrequencyFromSource: float (nullable = true) |-- standardError: double (nullable = true) |-- subStudyDescription: string (nullable = true) |-- qualityControls: array (nullable = true) diff --git a/components/dataset/summary_statistics/index.html b/components/dataset/summary_statistics/index.html index 17455ee9e..6e6429eef 100644 --- a/components/dataset/summary_statistics/index.html +++ b/components/dataset/summary_statistics/index.html @@ -1,7 +1,4 @@ - Summary statistics - Genetics Portal Pipeline
Skip to content

Summary statistics

Bases: Dataset

Summary Statistics dataset.

A summary statistics dataset contains all single point statistics resulting from a GWAS.

Source code in src/otg/dataset/summary_statistics.py
 24
- 25
- 26
- 27
+ Summary statistics - Genetics Portal Pipeline       

Summary statistics

Bases: Dataset

Summary Statistics dataset.

A summary statistics dataset contains all single point statistics resulting from a GWAS.

Source code in src/otg/dataset/summary_statistics.py
 27
  28
  29
  30
@@ -121,100 +118,7 @@
 144
 145
 146
-147
-148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
-168
-169
-170
-171
-172
-173
-174
-175
-176
-177
-178
-179
-180
-181
-182
-183
-184
-185
-186
-187
-188
-189
-190
-191
-192
-193
-194
-195
-196
-197
-198
-199
-200
-201
-202
-203
-204
-205
-206
-207
-208
-209
-210
-211
-212
-213
-214
-215
-216
-217
-218
-219
-220
-221
-222
-223
-224
-225
-226
-227
-228
-229
-230
-231
-232
-233
-234
-235
-236
-237
-238
-239
-240
@dataclass
+147
@dataclass
 class SummaryStatistics(Dataset):
     """Summary Statistics dataset.
 
@@ -223,85 +127,6 @@
 
     _schema: t.StructType = parse_spark_schema("summary_statistics.json")
 
-    @staticmethod
-    def _convert_odds_ratio_to_beta(
-        beta: Column, odds_ratio: Column, standard_error: Column
-    ) -> tuple:
-        """Harmonizes effect and standard error to beta.
-
-        Args:
-            beta (Column): Effect in beta
-            odds_ratio (Column): Effect in odds ratio
-            standard_error (Column): Standard error of the effect
-
-        Returns:
-            tuple: beta, standard error
-
-        Examples:
-            >>> df = spark.createDataFrame([{"beta": 0.1, "oddsRatio": 1.1, "standardError": 0.1}, {"beta": None, "oddsRatio": 1.1, "standardError": 0.1}, {"beta": 0.1, "oddsRatio": None, "standardError": 0.1}, {"beta": 0.1, "oddsRatio": 1.1, "standardError": None}])
-            >>> df.select("*", *SummaryStatistics._convert_odds_ratio_to_beta(f.col("beta"), f.col("oddsRatio"), f.col("standardError"))).show()
-            +----+---------+-------------+-------------------+-------------+
-            |beta|oddsRatio|standardError|               beta|standardError|
-            +----+---------+-------------+-------------------+-------------+
-            | 0.1|      1.1|          0.1|                0.1|          0.1|
-            |null|      1.1|          0.1|0.09531017980432493|         null|
-            | 0.1|     null|          0.1|                0.1|          0.1|
-            | 0.1|      1.1|         null|                0.1|         null|
-            +----+---------+-------------+-------------------+-------------+
-            <BLANKLINE>
-
-        """
-        # We keep standard error when effect is given in beta, otherwise drop.
-        standard_error = f.when(
-            standard_error.isNotNull() & beta.isNotNull(), standard_error
-        ).alias("standardError")
-
-        # Odds ratio is converted to beta:
-        beta = (
-            f.when(beta.isNotNull(), beta)
-            .when(odds_ratio.isNotNull(), f.log(odds_ratio))
-            .alias("beta")
-        )
-
-        return (beta, standard_error)
-
-    @staticmethod
-    def _calculate_confidence_interval(
-        pvalue_mantissa: Column,
-        pvalue_exponent: Column,
-        beta: Column,
-        standard_error: Column,
-    ) -> tuple:
-        """This function calculates the confidence interval for the effect based on the p-value and the effect size.
-
-        If the standard error already available, don't re-calculate from p-value.
-
-        Args:
-            pvalue_mantissa (Column): p-value mantissa (float)
-            pvalue_exponent (Column): p-value exponent (integer)
-            beta (Column): effect size in beta (float)
-            standard_error (Column): standard error.
-
-        Returns:
-            tuple: betaConfidenceIntervalLower (float), betaConfidenceIntervalUpper (float)
-        """
-        # Calculate p-value from mantissa and exponent:
-        pvalue = pvalue_mantissa * f.pow(10, pvalue_exponent)
-
-        # Fix p-value underflow:
-        pvalue = f.when(pvalue == 0, sys.float_info.min).otherwise(pvalue)
-
-        # Compute missing standard error:
-        standard_error = f.when(
-            standard_error.isNull(), f.abs(beta) / f.abs(pvalue_to_zscore(pvalue))
-        ).otherwise(standard_error)
-
-        # Calculate upper and lower confidence interval:
-        ci_lower = (beta - standard_error).alias("betaConfidenceIntervalLower")
-        ci_upper = (beta + standard_error).alias("betaConfidenceIntervalUpper")
-
-        return (ci_lower, ci_upper)
-
     @classmethod
     def from_parquet(
         cls: type[SummaryStatistics], session: Session, path: str
@@ -324,25 +149,28 @@
         sumstats_df: DataFrame,
         study_id: str,
     ) -> SummaryStatistics:
-        """Create summary statistics object from summary statistics harmonized by the GWAS Catalog.
+        """Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.
 
         Args:
-            sumstats_df (DataFrame): Harmonized dataset read as dataframe from GWAS Catalog.
-            study_id (str): GWAS Catalog Study accession.
+            sumstats_df (DataFrame): Harmonized dataset read as a spark dataframe from GWAS Catalog.
+            study_id (str): GWAS Catalog study accession.
 
         Returns:
             SummaryStatistics
         """
         # The effect allele frequency is an optional column, we have to test if it is there:
         allele_frequency_expression = (
-            f.col("hm_effect_allele_frequency").cast(t.DoubleType())
+            f.col("hm_effect_allele_frequency").cast(t.FloatType())
             if "hm_effect_allele_frequency" in sumstats_df.columns
             else f.lit(None)
         )
 
         # Processing columns of interest:
         processed_sumstats_df = (
-            sumstats_df.select(
+            sumstats_df
+            # Dropping rows which doesn't have proper position:
+            .filter(f.col("hm_pos").cast(t.IntegerType()).isNotNull())
+            .select(
                 # Adding study identifier:
                 f.lit(study_id).cast(t.StringType()).alias("studyId"),
                 # Adding variant identifier:
@@ -350,15 +178,25 @@
                 f.col("hm_chrom").alias("chromosome"),
                 f.col("hm_pos").cast(t.IntegerType()).alias("position"),
                 # Parsing p-value mantissa and exponent:
-                *parse_pvalue(f.col("p_value").cast(t.FloatType())),
+                *parse_pvalue(f.col("p_value")),
                 # Converting/calculating effect and confidence interval:
-                *cls._convert_odds_ratio_to_beta(
+                *convert_odds_ratio_to_beta(
                     f.col("hm_beta").cast(t.DoubleType()),
                     f.col("hm_odds_ratio").cast(t.DoubleType()),
                     f.col("standard_error").cast(t.DoubleType()),
                 ),
                 allele_frequency_expression.alias("effectAlleleFrequencyFromSource"),
             )
+            # The previous select expression generated the necessary fields for calculating the confidence intervals:
+            .select(
+                "*",
+                *calculate_confidence_interval(
+                    f.col("pValueMantissa"),
+                    f.col("pValueExponent"),
+                    f.col("beta"),
+                    f.col("standardError"),
+                ),
+            )
             .repartition(200, "chromosome")
             .sortWithinPartitions("position")
         )
@@ -368,36 +206,6 @@
             _df=processed_sumstats_df,
         )
 
-    def calculate_confidence_interval(self: SummaryStatistics) -> SummaryStatistics:
-        """A Function to add upper and lower confidence interval to a summary statistics dataset.
-
-        Returns:
-            SummaryStatistics:
-        """
-        columns = self._df.columns
-
-        # If confidence interval has already been calculated skip:
-        if (
-            "betaConfidenceIntervalLower" in columns
-            and "betaConfidenceIntervalUpper" in columns
-        ):
-            return self
-
-        # Calculate CI:
-        return SummaryStatistics(
-            _df=(
-                self._df.select(
-                    "*",
-                    *self._calculate_confidence_interval(
-                        f.col("pValueMantissa"),
-                        f.col("pValueExponent"),
-                        f.col("beta"),
-                        f.col("standardError"),
-                    ),
-                )
-            )
-        )
-
     def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:
         """Filter summary statistics based on the provided p-value threshold.
 
@@ -431,136 +239,95 @@
         """
         # Calculate distance-based clumping:
         return WindowBasedClumping.clump(self, distance)
-

calculate_confidence_interval()

A Function to add upper and lower confidence interval to a summary statistics dataset.

Returns:

Name Type Description
SummaryStatistics SummaryStatistics
Source code in src/otg/dataset/summary_statistics.py
178
-179
-180
-181
-182
-183
-184
-185
-186
-187
-188
-189
-190
-191
-192
-193
-194
-195
-196
-197
-198
-199
-200
-201
-202
-203
-204
-205
-206
def calculate_confidence_interval(self: SummaryStatistics) -> SummaryStatistics:
-    """A Function to add upper and lower confidence interval to a summary statistics dataset.
-
-    Returns:
-        SummaryStatistics:
-    """
-    columns = self._df.columns
-
-    # If confidence interval has already been calculated skip:
-    if (
-        "betaConfidenceIntervalLower" in columns
-        and "betaConfidenceIntervalUpper" in columns
-    ):
-        return self
-
-    # Calculate CI:
-    return SummaryStatistics(
-        _df=(
-            self._df.select(
-                "*",
-                *self._calculate_confidence_interval(
-                    f.col("pValueMantissa"),
-                    f.col("pValueExponent"),
-                    f.col("beta"),
-                    f.col("standardError"),
-                ),
-            )
-        )
-    )
-

from_gwas_harmonized_summary_stats(sumstats_df, study_id) classmethod

Create summary statistics object from summary statistics harmonized by the GWAS Catalog.

Parameters:

Name Type Description Default
sumstats_df DataFrame

Harmonized dataset read as dataframe from GWAS Catalog.

required
study_id str

GWAS Catalog Study accession.

required

Returns:

Type Description
SummaryStatistics

SummaryStatistics

Source code in src/otg/dataset/summary_statistics.py
128
-129
-130
-131
-132
-133
-134
-135
-136
-137
-138
-139
-140
-141
-142
-143
-144
-145
-146
-147
-148
-149
-150
-151
-152
-153
-154
-155
-156
-157
-158
-159
-160
-161
-162
-163
-164
-165
-166
-167
-168
-169
-170
-171
-172
-173
-174
-175
-176
@classmethod
+

from_gwas_harmonized_summary_stats(sumstats_df, study_id) classmethod

Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.

Parameters:

Name Type Description Default
sumstats_df DataFrame

Harmonized dataset read as a spark dataframe from GWAS Catalog.

required
study_id str

GWAS Catalog study accession.

required

Returns:

Type Description
SummaryStatistics

SummaryStatistics

Source code in src/otg/dataset/summary_statistics.py
 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
@classmethod
 def from_gwas_harmonized_summary_stats(
     cls: type[SummaryStatistics],
     sumstats_df: DataFrame,
     study_id: str,
 ) -> SummaryStatistics:
-    """Create summary statistics object from summary statistics harmonized by the GWAS Catalog.
+    """Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.
 
     Args:
-        sumstats_df (DataFrame): Harmonized dataset read as dataframe from GWAS Catalog.
-        study_id (str): GWAS Catalog Study accession.
+        sumstats_df (DataFrame): Harmonized dataset read as a spark dataframe from GWAS Catalog.
+        study_id (str): GWAS Catalog study accession.
 
     Returns:
         SummaryStatistics
     """
     # The effect allele frequency is an optional column, we have to test if it is there:
     allele_frequency_expression = (
-        f.col("hm_effect_allele_frequency").cast(t.DoubleType())
+        f.col("hm_effect_allele_frequency").cast(t.FloatType())
         if "hm_effect_allele_frequency" in sumstats_df.columns
         else f.lit(None)
     )
 
     # Processing columns of interest:
     processed_sumstats_df = (
-        sumstats_df.select(
+        sumstats_df
+        # Dropping rows which doesn't have proper position:
+        .filter(f.col("hm_pos").cast(t.IntegerType()).isNotNull())
+        .select(
             # Adding study identifier:
             f.lit(study_id).cast(t.StringType()).alias("studyId"),
             # Adding variant identifier:
@@ -568,15 +335,25 @@
             f.col("hm_chrom").alias("chromosome"),
             f.col("hm_pos").cast(t.IntegerType()).alias("position"),
             # Parsing p-value mantissa and exponent:
-            *parse_pvalue(f.col("p_value").cast(t.FloatType())),
+            *parse_pvalue(f.col("p_value")),
             # Converting/calculating effect and confidence interval:
-            *cls._convert_odds_ratio_to_beta(
+            *convert_odds_ratio_to_beta(
                 f.col("hm_beta").cast(t.DoubleType()),
                 f.col("hm_odds_ratio").cast(t.DoubleType()),
                 f.col("standard_error").cast(t.DoubleType()),
             ),
             allele_frequency_expression.alias("effectAlleleFrequencyFromSource"),
         )
+        # The previous select expression generated the necessary fields for calculating the confidence intervals:
+        .select(
+            "*",
+            *calculate_confidence_interval(
+                f.col("pValueMantissa"),
+                f.col("pValueExponent"),
+                f.col("beta"),
+                f.col("standardError"),
+            ),
+        )
         .repartition(200, "chromosome")
         .sortWithinPartitions("position")
     )
@@ -585,21 +362,21 @@
     return cls(
         _df=processed_sumstats_df,
     )
-

from_parquet(session, path) classmethod

Initialise SummaryStatistics from parquet file.

Parameters:

Name Type Description Default
session Session

Session

required
path str

Path to parquet file

required

Returns:

Name Type Description
SummaryStatistics SummaryStatistics

SummaryStatistics dataset

Source code in src/otg/dataset/summary_statistics.py
112
-113
-114
-115
-116
-117
-118
-119
-120
-121
-122
-123
-124
-125
-126
@classmethod
+

from_parquet(session, path) classmethod

Initialise SummaryStatistics from parquet file.

Parameters:

Name Type Description Default
session Session

Session

required
path str

Path to parquet file

required

Returns:

Name Type Description
SummaryStatistics SummaryStatistics

SummaryStatistics dataset

Source code in src/otg/dataset/summary_statistics.py
36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
@classmethod
 def from_parquet(
     cls: type[SummaryStatistics], session: Session, path: str
 ) -> SummaryStatistics:
@@ -614,27 +391,27 @@
     """
     df = session.read_parquet(path=path, schema=cls._schema)
     return cls(_df=df, _schema=cls._schema)
-

pvalue_filter(pvalue)

Filter summary statistics based on the provided p-value threshold.

Parameters:

Name Type Description Default
pvalue float

upper limit of the p-value to be filtered upon.

required

Returns:

Name Type Description
SummaryStatistics SummaryStatistics

summary statistics object containing single point associations with p-values at least as significant as the provided threshold.

Source code in src/otg/dataset/summary_statistics.py
208
-209
-210
-211
-212
-213
-214
-215
-216
-217
-218
-219
-220
-221
-222
-223
-224
-225
-226
-227
-228
def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:
+

pvalue_filter(pvalue)

Filter summary statistics based on the provided p-value threshold.

Parameters:

Name Type Description Default
pvalue float

upper limit of the p-value to be filtered upon.

required

Returns:

Name Type Description
SummaryStatistics SummaryStatistics

summary statistics object containing single point associations with p-values at least as significant as the provided threshold.

Source code in src/otg/dataset/summary_statistics.py
115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:
     """Filter summary statistics based on the provided p-value threshold.
 
     Args:
@@ -655,17 +432,17 @@
         )
     )
     return SummaryStatistics(_df=df)
-

window_based_clumping(distance)

Perform distance-based clumping.

Parameters:

Name Type Description Default
distance int

Distance in base pairs

required

Returns:

Name Type Description
StudyLocus StudyLocus

StudyLocus object

Source code in src/otg/dataset/summary_statistics.py
230
-231
-232
-233
-234
-235
-236
-237
-238
-239
-240
def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:
+

window_based_clumping(distance)

Perform distance-based clumping.

Parameters:

Name Type Description Default
distance int

Distance in base pairs

required

Returns:

Name Type Description
StudyLocus StudyLocus

StudyLocus object

Source code in src/otg/dataset/summary_statistics.py
137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:
     """Perform distance-based clumping.
 
     Args:
@@ -686,6 +463,6 @@
  |-- betaConfidenceIntervalUpper: double (nullable = true)
  |-- pValueMantissa: float (nullable = false)
  |-- pValueExponent: integer (nullable = false)
- |-- effectAlleleFrequencyFromSource: double (nullable = true)
+ |-- effectAlleleFrequencyFromSource: float (nullable = true)
  |-- standardError: double (nullable = true)
 

\ No newline at end of file diff --git a/objects.inv b/objects.inv index 836b896d1691d87af737cfafd624bf55b9b5eedf..6dcc88f9d9d875a1c7a0be46a0ec9dd95b08c3f4 100644 GIT binary patch delta 1341 zcmV-D1;YBx3%LuBjelKp<0ce_@B1q<)81B@=CXIYPB)WjX1X0ucP|+%VVMR21>~&v z*B69jODkkPEGbt}uz22c4qpclQ)N~_1tn7FzmN9s9Y6h^6DZ}~Z+AJXIHQOvdEdTu z54wk+f4TehCpOK=ioHQWrNpfynIBLE=b{0?nN$E%tN<|3eSb}Jg&DmSycetj9E$&H z8^yV}M@XcdEut49kC;j+ro;>DX zs)XUPJthhXuseYrX6nR|^xGXT5*3y-ykxb=(E@bSMg}JVsh6-`o~j)qGte>BEvbi* z%$~a^Pj5Nb2!Db_He)1Qb0q>$yC$Tx!*l`uzwn5W5NIiUP0NMQ>QIXc&L5ZK!)+Fm zqV3Tyk0BFO_qrdHK}wl2D?(EYaKBSuH#b5g7f4IYii{g3(_pFU;so^cQ%k^(ND#=g zttopkMSs~6Pq-b7M4?Ex!i4Lpg5m_EZe}bM&Skc>MSt6KiO<0iB2C7+SH3|B4p3B# z;_s*c{h9&&ZeC-;V?CEF*Tbtuz~%%i>b~g_z}~=4*C;?v>Wbr#c{D`-2nB{zfp#)6 zN!k}%^x<6M>0)CRz|IhwEO>C*SmN$|_i_ts2~wXOk_Fj;`($kmh>rWI#!| zvU+9qUw;zmxetLO;vi_Doz((P*#13a89DI-qVFIr@m{@ytP+sQTwtz1t~Uxu_jS|l zRvq0H{01txrXtswx*lQkD%w=46$?oy2Nh$ui8E88&Nv23N=7%$l*B`seR+5aYyv&a zn*He{rndl^_aY`_KRKx_C$TnrwSjFi;gFu#tABJ4`u_ytlQQfZ|w4c6y*lzS2gq4ZTHv5p=A`(>R@%mOH%*trfO<=Mx zsZ6R4D^8GJr@w~`|D`OexjkvNFBZ!h((HkI`|0o?f&X>t^mx-$X06<X`8+qdt#w|~4C-=g8=Ij&9W#+sCpnT$2l4Y9ak3CCeq)GXXf`HSuhT zlZ7ueG}G)n2b2G40mW`r(lSo^tv+#AB>!9-Lv_biZ*+eElIkpKL=*?(T| z0(@}V{$pLe)2FC{et(@9=XF5#wJH5*3O7NMkQ0+G7C2RTRIh$NvQ=p6LQm?SCO;VQ zLNifc8C)JV-Q&{1+%!L=nA>irvv!Z}^Ln)VcGM4WaD-_+>rwnJjUcX1I(!bfKH`|- z1UTlxwZ08de(#)y^|FGjmv?xbbbn?O*JvTGI-#AjLtKEnUK7m)u(@3XTp%u(UNhuC z#{MclpMQ;siUU5Q6_L@rYK$YmBh9#QUzPjuiwa**$DdHR2mt{ZgIdpf7{8xmO8vUS zYzOIU_RpUm?UT=^a}_Toz%K`^b!1AE97Z z#Uj$Ll$2eS+a^t delta 1361 zcmV-X1+MzJ3(X6VjekvZ<0cr#@BI{+X|GkLIrg^ebTgS|rrYs!_sC!g%QOfmAZNXw zz91x9ULo;fNjX_Y@cTc{!#jwnGAp2h5~=g=qy4?(r{8k|rM&y?E@u^I6j3Gb+g$gc zd-(a6yI+4|eE^$aj=Zvsi<0Letv(43CPt9MKjbuj#$EQ^yigXPY=v(%rAD_*W$NWo? zFkH6BL?HooC$Pgzoj8(yyW>To!jguUtQI+1fNtu@;3OdR64uL8wPR!kI;OfNbvKgP zbNA$FE$0$Juz$#EjD%~hL;z~ngp{_MF2Mg69x)OEErqXXxe!_%YEi-Y<8pks&08}+og5hA%jT4GjYTsIj9OH~&qpogDY0(L}#K%Q+) z+KWm0%cgk3?O-GdMYlsmulD%1mMs>9%f~-KxF2g5N*|*Hq+s$Dn)Iyoz>7*NTNKY_AH&a1%$SM4e#_ zmXwUHnlXuoGW+uI6j%j%m^J(4QA}?FH2ES1WPd+7sV+ycb`@;{+pNe#T4JxzJ?Q@l z#3!xvZ;-Re=Ikl#4CGe9PngmtL?fcud)A9mN_AyLip8B#&7}SG^}}|fa}ZW0I@|0+ za*Ie%oyY52jW9E#U2X!i3X{q#)nUa6()0B9knX>fWi_`a-Zo>gtRc-_ytl6pKRM`n znScEgh`$`)71k5ZSMOnmlxxkG`u{= zwXnxDlO>3ANko1FL#Tgj;1NSHZ6C^;Oo6Nz8^_3`&=u0|>zQ~u9}te)NQ!PB%ffKY zTJTZ|=_e^!F2SA&z!^6e0FztdWZ_E*%``jDtHJ-YfMPeR=_Mj)E}6PTu=aBSxqml= z%Y%uiXzqI;A-9Qeb0Giud9$5v0{rT<{l~I;r%zD@{r);J&g+2eYm@rXByNHRAtxqX zEO4mus7`*zvQ=p8Vo~Z>qaUt#p_!;BI+uq{_qcR0H_eL_bKC87*6z`LUJrKPj`9Hx z4lu1}J&NC@0mSubiO(U|$0TE%0Ds3^xYoBTl;1n2VZBvB)?0UYopi1yrf4CqI-#Aj z*SG+8y(XFqU~{_&xIkPmJ!i-(8T(d#KEI8LiUU5Q6_L@LG{zC&fo5E|C*^+pvcnhD z;inufLO?)nhU(&ECM8`AyIngestion and analysis of genetic and functional genomic data for the identification and prioritisation of drug targets.

This project is still in experimental phase. Please refer to the roadmap section for more information.

For information on how to configure the development environment, run the code, or contribute changes, see the contributing section. For known technical issues and solutions to them, see the troubleshooting section.

"},{"location":"contributing/","title":"Environment configuration and contributing changes","text":""},{"location":"contributing/#one-time-configuration","title":"One-time configuration","text":"

The steps in this section only ever need to be done once on any particular system.

Google Cloud configuration: 1. Install Google Cloud SDK: https://cloud.google.com/sdk/docs/install. 1. Log in to your work Google Account: run gcloud auth login and follow instructions. 1. Obtain Google application credentials: run gcloud auth application-default login and follow instructions.

Check that you have the make utility installed, and if not (which is unlikely), install it using your system package manager.

Check that you have java installed.

"},{"location":"contributing/#environment-configuration","title":"Environment configuration","text":"

Run make setup-dev to install/update the necessary packages and activate the development environment. You need to do this every time you open a new shell.

It is recommended to use VS Code as an IDE for development.

"},{"location":"contributing/#how-to-run-the-code","title":"How to run the code","text":"

All pipelines in this repository are intended to be run in Google Dataproc. Running them locally is not currently supported.

In order to run the code:

  1. Manually edit your local workflow/dag.yaml file and comment out the steps you do not want to run.

  2. Manually edit your local pyproject.toml file and modify the version of the code.

    • This must be different from the version used by any other people working on the repository to avoid any deployment conflicts, so it's a good idea to use your name, for example: 1.2.3+jdoe.
    • You can also add a brief branch description, for example: 1.2.3+jdoe.myfeature.
    • Note that the version must comply with PEP440 conventions, otherwise Poetry will not allow it to be deployed.
    • Do not use underscores or hyphens in your version name. When building the WHL file, they will be automatically converted to dots, which means the file name will no longer match the version and the build will fail. Use dots instead.
  3. Run make build.

    • This will create a bundle containing the neccessary code, configuration and dependencies to run the ETL pipeline, and then upload this bundle to Google Cloud.
    • A version specific subpath is used, so uploading the code will not affect any branches but your own.
    • If there was already a code bundle uploaded with the same version number, it will be replaced.
  4. Submit the Dataproc job with poetry run python workflow/workflow_template.py

    • You will need to specify additional parameters, some are mandatory and some are optional. Run with --help to see usage.
    • The script will provision the cluster and submit the job.
    • The cluster will take a few minutes to get provisioned and running, during which the script will not output anything, this is normal.
    • Once submitted, you can monitor the progress of your job on this page: https://console.cloud.google.com/dataproc/jobs?project=open-targets-genetics-dev.
    • On completion (whether successful or a failure), the cluster will be automatically removed, so you don't have to worry about shutting it down to avoid incurring charges.
"},{"location":"contributing/#how-to-generate-a-local-copy-of-the-documentation","title":"How to generate a local copy of the documentation","text":"

Run poetry run mkdocs serve. This will generate the local copy of the documentation and will start a local server to browse it (URL will be printed, usually http://127.0.0.1:8000/).

"},{"location":"contributing/#how-to-run-the-tests","title":"How to run the tests","text":"

Run poetry run pytest.

"},{"location":"contributing/#contributing-checklist","title":"Contributing checklist","text":"

When making changes, and especially when implementing a new module or feature, it's essential to ensure that all relevant sections of the code base are modified.

"},{"location":"contributing/#documentation","title":"Documentation","text":"
  • If during development you had a question which wasn't covered in the documentation, and someone explained it to you, add it to the documentation. The same applies if you encountered any instructions in the documentation which were obsolete or incorrect.
  • Documentation autogeneration expressions start with :::. They will automatically generate sections of the documentation based on class and method docstrings. Be sure to update them for:
  • Dataset definitions in docs/reference/dataset (example: docs/reference/dataset/study_index/study_index_finngen.md)
  • Step definitions in docs/reference/step (example: docs/reference/step/finngen.md)
"},{"location":"contributing/#configuration","title":"Configuration","text":"
  • Input and output paths in config/datasets/gcp.yaml
  • Step configuration in config/step/my_STEP.yaml (example: config/step/my_finngen.yaml)
"},{"location":"contributing/#classes","title":"Classes","text":"
  • Step configuration class in src/org/config.py (example: FinnGenStepConfig class in that module)
  • Dataset class in src/org/dataset/ (example: src/otg/dataset/study_index.py \u2192 StudyIndexFinnGen)
  • Step main running class in src/org/STEP.py (example: src/org/finngen.py)
"},{"location":"contributing/#tests","title":"Tests","text":"
  • Test study fixture in tests/conftest.py (example: mock_study_index_finngen in that module)
  • Test sample data in tests/data_samples (example: tests/data_samples/finngen_studies_sample.json)
  • Test definition in tests/ (example: tests/dataset/test_study_index.py \u2192 test_study_index_finngen_creation)
"},{"location":"roadmap/","title":"Roadmap","text":"

The Open Targets core team is working on refactoring Open Targets Genetics, aiming to:

  • Re-focus the product around Target ID
  • Create a gold standard toolkit for post-GWAS analysis
  • Faster/robust addition of new datasets and datatypes
  • Reduce computational and financial cost

See here for a list of open issues for this project.

Schematic diagram representing the drafted process:

"},{"location":"troubleshooting/","title":"Troubleshooting","text":""},{"location":"troubleshooting/#blaslapack","title":"BLAS/LAPACK","text":"

If you see errors related to BLAS/LAPACK libraries, see this StackOverflow post for guidance.

"},{"location":"troubleshooting/#pyenv-and-poetry","title":"Pyenv and Poetry","text":"

If you see various errors thrown by Pyenv or Poetry, they can be hard to specifically diagnose and resolve. In this case, it often helps to remove those tools from the system completely. Follow these steps:

  1. Close your currently activated environment, if any: exit
  2. Uninstall Poetry: curl -sSL https://install.python-poetry.org | python3 - --uninstall
  3. Clear Poetry cache: rm -rf ~/.cache/pypoetry
  4. Clear pre-commit cache: rm -rf ~/.cache/pre-commit
  5. Switch to system Python shell: pyenv shell system
  6. Edit ~/.bashrc to remove the lines related to Pyenv configuration
  7. Remove Pyenv configuration and cache: rm -rf ~/.pyenv

After that, open a fresh shell session and run make setup-dev again.

"},{"location":"troubleshooting/#java","title":"Java","text":"

Officially, PySpark requires Java version 8 (a.k.a. 1.8) or above to work. However, if you have a very recent version of Java, you may experience issues, as it may introduce breaking changes that PySpark hasn't had time to integrate. For example, as of May 2023, PySpark did not work with Java 20.

If you are encountering problems with initialising a Spark session, try using Java 11.

"},{"location":"troubleshooting/#pre-commit","title":"Pre-commit","text":"

If you see an error message thrown by pre-commit, which looks like this (SyntaxError: Unexpected token '?'), followed by a JavaScript traceback, the issue is likely with your system NodeJS version.

One solution which can help in this case is to upgrade your system NodeJS version. However, this may not always be possible. For example, Ubuntu repository is several major versions behind the latest version as of July 2023.

Another solution which helps is to remove Node, NodeJS, and npm from your system entirely. In this case, pre-commit will not try to rely on a system version of NodeJS and will install its own, suitable one.

On Ubuntu, this can be done using sudo apt remove node nodejs npm, followed by sudo apt autoremove. But in some cases, depending on your existing installation, you may need to also manually remove some files. See this StackOverflow answer for guidance.

After running these commands, you are advised to open a fresh shell, and then also reinstall Pyenv and Poetry to make sure they pick up the changes (see relevant section above).

"},{"location":"components/dataset/_dataset/","title":"Dataset","text":"

Open Targets Genetics Dataset.

Dataset is a wrapper around a Spark DataFrame with a predefined schema. Schemas for each child dataset are described in the json.schemas module.

Source code in src/otg/dataset/dataset.py
@dataclass\nclass Dataset:\n\"\"\"Open Targets Genetics Dataset.\n\n    `Dataset` is a wrapper around a Spark DataFrame with a predefined schema. Schemas for each child dataset are described in the `json.schemas` module.\n    \"\"\"\n\n    _df: DataFrame\n    _schema: StructType\n\n    def __post_init__(self: Dataset) -> None:\n\"\"\"Post init.\"\"\"\n        self.validate_schema()\n\n    @property\n    def df(self: Dataset) -> DataFrame:\n\"\"\"Dataframe included in the Dataset.\"\"\"\n        return self._df\n\n    @df.setter\n    def df(self: Dataset, new_df: DataFrame) -> None:  # noqa: CCE001\n\"\"\"Dataframe setter.\"\"\"\n        self._df = new_df\n        self.validate_schema()\n\n    @property\n    def schema(self: Dataset) -> StructType:\n\"\"\"Dataframe expected schema.\"\"\"\n        return self._schema\n\n    @classmethod\n    def from_parquet(\n        cls: type[Dataset], session: Session, path: str, schema: StructType\n    ) -> Dataset:\n\"\"\"Reads a parquet file into a Dataset with a given schema.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n            schema (StructType): Schema to use\n\n        Returns:\n            Dataset: Dataset with given schema\n        \"\"\"\n        df = session.read_parquet(path=path, schema=schema)\n        return cls(_df=df, _schema=schema)\n\n    def validate_schema(self: Dataset) -> None:  # sourcery skip: invert-any-all\n\"\"\"Validate DataFrame schema against expected class schema.\n\n        Raises:\n            ValueError: DataFrame schema is not valid\n        \"\"\"\n        expected_schema = self._schema\n        expected_fields = flatten_schema(expected_schema)\n        observed_schema = self._df.schema\n        observed_fields = flatten_schema(observed_schema)\n\n        # Unexpected fields in dataset\n        if unexpected_struct_fields := [\n            x for x in observed_fields if x not in expected_fields\n        ]:\n            raise ValueError(\n                f\"The {unexpected_struct_fields} fields are not included in DataFrame schema: {expected_fields}\"\n            )\n\n        # Required fields not in dataset\n        required_fields = [x.name for x in expected_schema if not x.nullable]\n        if missing_required_fields := [\n            req\n            for req in required_fields\n            if not any(field.name == req for field in observed_fields)\n        ]:\n            raise ValueError(\n                f\"The {missing_required_fields} fields are required but missing: {required_fields}\"\n            )\n\n        # Fields with duplicated names\n        if duplicated_fields := [\n            x for x in set(observed_fields) if observed_fields.count(x) > 1\n        ]:\n            raise ValueError(\n                f\"The following fields are duplicated in DataFrame schema: {duplicated_fields}\"\n            )\n\n        # Fields with different datatype\n        if fields_with_different_observed_datatype := [\n            field\n            for field in set(observed_fields)\n            if observed_fields.count(field) != expected_fields.count(field)\n        ]:\n            raise ValueError(\n                f\"The following fields present differences in their datatypes: {fields_with_different_observed_datatype}.\"\n            )\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.df","title":"df: DataFrame property writable","text":"

Dataframe included in the Dataset.

"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.schema","title":"schema: StructType property","text":"

Dataframe expected schema.

"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.__post_init__","title":"__post_init__()","text":"

Post init.

Source code in src/otg/dataset/dataset.py
def __post_init__(self: Dataset) -> None:\n\"\"\"Post init.\"\"\"\n    self.validate_schema()\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.from_parquet","title":"from_parquet(session, path, schema) classmethod","text":"

Reads a parquet file into a Dataset with a given schema.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required schema StructType

Schema to use

required

Returns:

Name Type Description Dataset Dataset

Dataset with given schema

Source code in src/otg/dataset/dataset.py
@classmethod\ndef from_parquet(\n    cls: type[Dataset], session: Session, path: str, schema: StructType\n) -> Dataset:\n\"\"\"Reads a parquet file into a Dataset with a given schema.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n        schema (StructType): Schema to use\n\n    Returns:\n        Dataset: Dataset with given schema\n    \"\"\"\n    df = session.read_parquet(path=path, schema=schema)\n    return cls(_df=df, _schema=schema)\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.validate_schema","title":"validate_schema()","text":"

Validate DataFrame schema against expected class schema.

Raises:

Type Description ValueError

DataFrame schema is not valid

Source code in src/otg/dataset/dataset.py
def validate_schema(self: Dataset) -> None:  # sourcery skip: invert-any-all\n\"\"\"Validate DataFrame schema against expected class schema.\n\n    Raises:\n        ValueError: DataFrame schema is not valid\n    \"\"\"\n    expected_schema = self._schema\n    expected_fields = flatten_schema(expected_schema)\n    observed_schema = self._df.schema\n    observed_fields = flatten_schema(observed_schema)\n\n    # Unexpected fields in dataset\n    if unexpected_struct_fields := [\n        x for x in observed_fields if x not in expected_fields\n    ]:\n        raise ValueError(\n            f\"The {unexpected_struct_fields} fields are not included in DataFrame schema: {expected_fields}\"\n        )\n\n    # Required fields not in dataset\n    required_fields = [x.name for x in expected_schema if not x.nullable]\n    if missing_required_fields := [\n        req\n        for req in required_fields\n        if not any(field.name == req for field in observed_fields)\n    ]:\n        raise ValueError(\n            f\"The {missing_required_fields} fields are required but missing: {required_fields}\"\n        )\n\n    # Fields with duplicated names\n    if duplicated_fields := [\n        x for x in set(observed_fields) if observed_fields.count(x) > 1\n    ]:\n        raise ValueError(\n            f\"The following fields are duplicated in DataFrame schema: {duplicated_fields}\"\n        )\n\n    # Fields with different datatype\n    if fields_with_different_observed_datatype := [\n        field\n        for field in set(observed_fields)\n        if observed_fields.count(field) != expected_fields.count(field)\n    ]:\n        raise ValueError(\n            f\"The following fields present differences in their datatypes: {fields_with_different_observed_datatype}.\"\n        )\n
"},{"location":"components/dataset/colocalisation/","title":"Colocalisation","text":"

Bases: Dataset

Colocalisation results for pairs of overlapping study-locus.

Source code in src/otg/dataset/colocalisation.py
@dataclass\nclass Colocalisation(Dataset):\n\"\"\"Colocalisation results for pairs of overlapping study-locus.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"colocalisation.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[Colocalisation], session: Session, path: str\n    ) -> Colocalisation:\n\"\"\"Initialise Colocalisation dataset from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            Colocalisation: Colocalisation results\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/colocalisation/#otg.dataset.colocalisation.Colocalisation.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise Colocalisation dataset from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description Colocalisation Colocalisation

Colocalisation results

Source code in src/otg/dataset/colocalisation.py
@classmethod\ndef from_parquet(\n    cls: type[Colocalisation], session: Session, path: str\n) -> Colocalisation:\n\"\"\"Initialise Colocalisation dataset from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        Colocalisation: Colocalisation results\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/colocalisation/#schema","title":"Schema","text":"
root\n |-- left_studyLocusId: long (nullable = false)\n |-- right_studyLocusId: long (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- colocalisationMethod: string (nullable = false)\n |-- coloc_n_vars: long (nullable = false)\n |-- coloc_h0: double (nullable = true)\n |-- coloc_h1: double (nullable = true)\n |-- coloc_h2: double (nullable = true)\n |-- coloc_h3: double (nullable = true)\n |-- coloc_h4: double (nullable = true)\n |-- coloc_log2_h4_h3: double (nullable = true)\n |-- clpp: double (nullable = true)\n
"},{"location":"components/dataset/gene_index/","title":"Gene index","text":"

Bases: Dataset

Gene index dataset.

Gene-based annotation.

Source code in src/otg/dataset/gene_index.py
@dataclass\nclass GeneIndex(Dataset):\n\"\"\"Gene index dataset.\n\n    Gene-based annotation.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"targets.json\")\n\n    @staticmethod\n    def _get_gene_tss(strand_col: Column, start_col: Column, end_col: Column) -> Column:\n\"\"\"Returns the TSS of a gene based on its orientation.\n\n        Args:\n            strand_col (Column): Column containing 1 if the coding strand of the gene is forward, and -1 if it is reverse.\n            start_col (Column): Column containing the start position of the gene.\n            end_col (Column): Column containing the end position of the gene.\n\n        Returns:\n            Column: Column containing the TSS of the gene.\n\n        Examples:\n            >>> df = spark.createDataFrame([{\"strand\": 1, \"start\": 100, \"end\": 200}, {\"strand\": -1, \"start\": 100, \"end\": 200}])\n            >>> df.withColumn(\"tss\", GeneIndex._get_gene_tss(f.col(\"strand\"), f.col(\"start\"), f.col(\"end\"))).show()\n            +---+-----+------+---+\n            |end|start|strand|tss|\n            +---+-----+------+---+\n            |200|  100|     1|100|\n            |200|  100|    -1|200|\n            +---+-----+------+---+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.when(strand_col == 1, start_col).when(strand_col == -1, end_col)\n\n    @classmethod\n    def from_source(cls: type[GeneIndex], target_index: DataFrame) -> GeneIndex:\n\"\"\"Initialise GeneIndex from source dataset.\n\n        Args:\n            target_index (DataFrame): Target index dataframe\n\n        Returns:\n            GeneIndex: Gene index dataset\n        \"\"\"\n        return cls(\n            _df=target_index.select(\n                f.coalesce(f.col(\"id\"), f.lit(\"unknown\")).alias(\"geneId\"),\n                f.coalesce(f.col(\"genomicLocation.chromosome\"), f.lit(\"unknown\")).alias(\n                    \"chromosome\"\n                ),\n                GeneIndex._get_gene_tss(\n                    f.col(\"genomicLocation.strand\"),\n                    f.col(\"genomicLocation.start\"),\n                    f.col(\"genomicLocation.end\"),\n                ).alias(\"tss\"),\n                \"biotype\",\n                \"approvedSymbol\",\n                \"obsoleteSymbols\",\n            )\n        )\n\n    @classmethod\n    def from_parquet(cls: type[GeneIndex], session: Session, path: str) -> GeneIndex:\n\"\"\"Initialise GeneIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            GeneIndex: Gene index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def filter_by_biotypes(self: GeneIndex, biotypes: list) -> GeneIndex:\n\"\"\"Filter by approved biotypes.\n\n        Args:\n            biotypes (list): List of Ensembl biotypes to keep.\n\n        Returns:\n            GeneIndex: Gene index dataset filtered by biotypes.\n        \"\"\"\n        self.df = self._df.filter(f.col(\"biotype\").isin(biotypes))\n        return self\n\n    def locations_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene location information.\n\n        Returns:\n            DataFrame: Gene LUT including genomic location information.\n        \"\"\"\n        return self.df.select(\n            \"geneId\",\n            \"chromosome\",\n            \"tss\",\n        )\n\n    def symbols_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene symbol lookup table.\n\n        Pre-processess gene/target dataset to create lookup table of gene symbols, including\n        obsoleted gene symbols.\n\n        Returns:\n            DataFrame: Gene LUT for symbol mapping containing `geneId` and `geneSymbol` columns.\n        \"\"\"\n        return self.df.select(\n            \"geneId\",\n            f.explode(\n                f.array_union(f.array(\"approvedSymbol\"), f.col(\"obsoleteSymbols.label\"))\n            ).alias(\"geneSymbol\"),\n        )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.filter_by_biotypes","title":"filter_by_biotypes(biotypes)","text":"

Filter by approved biotypes.

Parameters:

Name Type Description Default biotypes list

List of Ensembl biotypes to keep.

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset filtered by biotypes.

Source code in src/otg/dataset/gene_index.py
def filter_by_biotypes(self: GeneIndex, biotypes: list) -> GeneIndex:\n\"\"\"Filter by approved biotypes.\n\n    Args:\n        biotypes (list): List of Ensembl biotypes to keep.\n\n    Returns:\n        GeneIndex: Gene index dataset filtered by biotypes.\n    \"\"\"\n    self.df = self._df.filter(f.col(\"biotype\").isin(biotypes))\n    return self\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise GeneIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset

Source code in src/otg/dataset/gene_index.py
@classmethod\ndef from_parquet(cls: type[GeneIndex], session: Session, path: str) -> GeneIndex:\n\"\"\"Initialise GeneIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        GeneIndex: Gene index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.from_source","title":"from_source(target_index) classmethod","text":"

Initialise GeneIndex from source dataset.

Parameters:

Name Type Description Default target_index DataFrame

Target index dataframe

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset

Source code in src/otg/dataset/gene_index.py
@classmethod\ndef from_source(cls: type[GeneIndex], target_index: DataFrame) -> GeneIndex:\n\"\"\"Initialise GeneIndex from source dataset.\n\n    Args:\n        target_index (DataFrame): Target index dataframe\n\n    Returns:\n        GeneIndex: Gene index dataset\n    \"\"\"\n    return cls(\n        _df=target_index.select(\n            f.coalesce(f.col(\"id\"), f.lit(\"unknown\")).alias(\"geneId\"),\n            f.coalesce(f.col(\"genomicLocation.chromosome\"), f.lit(\"unknown\")).alias(\n                \"chromosome\"\n            ),\n            GeneIndex._get_gene_tss(\n                f.col(\"genomicLocation.strand\"),\n                f.col(\"genomicLocation.start\"),\n                f.col(\"genomicLocation.end\"),\n            ).alias(\"tss\"),\n            \"biotype\",\n            \"approvedSymbol\",\n            \"obsoleteSymbols\",\n        )\n    )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.locations_lut","title":"locations_lut()","text":"

Gene location information.

Returns:

Name Type Description DataFrame DataFrame

Gene LUT including genomic location information.

Source code in src/otg/dataset/gene_index.py
def locations_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene location information.\n\n    Returns:\n        DataFrame: Gene LUT including genomic location information.\n    \"\"\"\n    return self.df.select(\n        \"geneId\",\n        \"chromosome\",\n        \"tss\",\n    )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.symbols_lut","title":"symbols_lut()","text":"

Gene symbol lookup table.

Pre-processess gene/target dataset to create lookup table of gene symbols, including obsoleted gene symbols.

Returns:

Name Type Description DataFrame DataFrame

Gene LUT for symbol mapping containing geneId and geneSymbol columns.

Source code in src/otg/dataset/gene_index.py
def symbols_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene symbol lookup table.\n\n    Pre-processess gene/target dataset to create lookup table of gene symbols, including\n    obsoleted gene symbols.\n\n    Returns:\n        DataFrame: Gene LUT for symbol mapping containing `geneId` and `geneSymbol` columns.\n    \"\"\"\n    return self.df.select(\n        \"geneId\",\n        f.explode(\n            f.array_union(f.array(\"approvedSymbol\"), f.col(\"obsoleteSymbols.label\"))\n        ).alias(\"geneSymbol\"),\n    )\n
"},{"location":"components/dataset/gene_index/#schema","title":"Schema","text":"
root\n |-- geneId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- approvedSymbol: string (nullable = true)\n |-- biotype: string (nullable = true)\n |-- approvedName: string (nullable = true)\n |-- obsoleteSymbols: array (nullable = true)\n |    |-- element: struct (containsNull = true)\n |    |    |-- label: string (nullable = true)\n |    |    |-- source: string (nullable = true)\n |-- tss: long (nullable = true)\n
"},{"location":"components/dataset/intervals/","title":"Intervals","text":"

Bases: Dataset

Intervals dataset links genes to genomic regions based on genome interaction studies.

Source code in src/otg/dataset/intervals.py
@dataclass\nclass Intervals(Dataset):\n\"\"\"Intervals dataset links genes to genomic regions based on genome interaction studies.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"intervals.json\")\n\n    @classmethod\n    def from_parquet(cls: type[Intervals], session: Session, path: str) -> Intervals:\n\"\"\"Initialise Intervals from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            Intervals: Intervals dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def parse_andersson(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse Andersson et al. 2014 dataset.\n\n        Args:\n            session (Session): session\n            path (str): Path to dataset\n            gene_index (GeneIndex): Gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: Intervals dataset\n        \"\"\"\n        # Constant values:\n        dataset_name = \"andersson2014\"\n        experiment_type = \"fantom5\"\n        pmid = \"24670763\"\n        bio_feature = \"aggregate\"\n        twosided_threshold = 2.45e6  # <-  this needs to phased out. Filter by percentile instead of absolute value.\n\n        session.logger.info(\"Parsing Andersson 2014 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Expected andersson et al. schema:\n        input_schema = t.StructType.fromJson(\n            json.loads(\n                pkg_resources.read_text(schemas, \"andersson2014.json\", encoding=\"utf-8\")\n            )\n        )\n\n        # Read the anderson file:\n        parsed_anderson_df = (\n            session.spark.read.option(\"delimiter\", \"\\t\")\n            .option(\"header\", \"true\")\n            .schema(input_schema)\n            .csv(path)\n            # Parsing score column and casting as float:\n            .withColumn(\"score\", f.col(\"score\").cast(\"float\") / f.lit(1000))\n            # Parsing the 'name' column:\n            .withColumn(\"parsedName\", f.split(f.col(\"name\"), \";\"))\n            .withColumn(\"gene_symbol\", f.col(\"parsedName\")[2])\n            .withColumn(\"location\", f.col(\"parsedName\")[0])\n            .withColumn(\n                \"chrom\",\n                f.regexp_replace(f.split(f.col(\"location\"), \":|-\")[0], \"chr\", \"\"),\n            )\n            .withColumn(\n                \"start\", f.split(f.col(\"location\"), \":|-\")[1].cast(t.IntegerType())\n            )\n            .withColumn(\n                \"end\", f.split(f.col(\"location\"), \":|-\")[2].cast(t.IntegerType())\n            )\n            # Select relevant columns:\n            .select(\"chrom\", \"start\", \"end\", \"gene_symbol\", \"score\")\n            # Drop rows with non-canonical chromosomes:\n            .filter(\n                f.col(\"chrom\").isin([str(x) for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"])\n            )\n            # For each region/gene, keep only one row with the highest score:\n            .groupBy(\"chrom\", \"start\", \"end\", \"gene_symbol\")\n            .agg(f.max(\"score\").alias(\"resourceScore\"))\n            .orderBy(\"chrom\", \"start\")\n        )\n\n        return cls(\n            _df=(\n                # Lift over the intervals:\n                lift.convert_intervals(parsed_anderson_df, \"chrom\", \"start\", \"end\")\n                .drop(\"start\", \"end\")\n                .withColumnRenamed(\"mapped_start\", \"start\")\n                .withColumnRenamed(\"mapped_end\", \"end\")\n                .distinct()\n                # Joining with the gene index\n                .alias(\"intervals\")\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_symbol\") == f.col(\"genes.geneSymbol\")],\n                    how=\"left\",\n                )\n                .filter(\n                    # Drop rows where the gene is not on the same chromosome\n                    (f.col(\"chrom\") == f.col(\"chromosome\"))\n                    # Drop rows where the TSS is far from the start of the region\n                    & (\n                        f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                        <= twosided_threshold\n                    )\n                )\n                # Select relevant columns:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    \"resourceScore\",\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                    f.lit(bio_feature).alias(\"biofeature\"),\n                )\n            )\n        )\n\n    @classmethod\n    def parse_javierre(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse Javierre et al. 2016 dataset.\n\n        Args:\n            session (Session): session\n            path (str): Path to dataset\n            gene_index (GeneIndex): Gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: Javierre et al. 2016 interval data\n        \"\"\"\n        # Constant values:\n        dataset_name = \"javierre2016\"\n        experiment_type = \"pchic\"\n        pmid = \"27863249\"\n        twosided_threshold = 2.45e6\n\n        session.logger.info(\"Parsing Javierre 2016 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Javierre data:\n        javierre_raw = (\n            session.spark.read.parquet(path)\n            # Splitting name column into chromosome, start, end, and score:\n            .withColumn(\"name_split\", f.split(f.col(\"name\"), r\":|-|,\"))\n            .withColumn(\n                \"name_chr\",\n                f.regexp_replace(f.col(\"name_split\")[0], \"chr\", \"\").cast(\n                    t.StringType()\n                ),\n            )\n            .withColumn(\"name_start\", f.col(\"name_split\")[1].cast(t.IntegerType()))\n            .withColumn(\"name_end\", f.col(\"name_split\")[2].cast(t.IntegerType()))\n            .withColumn(\"name_score\", f.col(\"name_split\")[3].cast(t.FloatType()))\n            # Cleaning up chromosome:\n            .withColumn(\n                \"chrom\",\n                f.regexp_replace(f.col(\"chrom\"), \"chr\", \"\").cast(t.StringType()),\n            )\n            .drop(\"name_split\", \"name\", \"annotation\")\n            # Keep canonical chromosomes and consistent chromosomes with scores:\n            .filter(\n                (f.col(\"name_score\").isNotNull())\n                & (f.col(\"chrom\") == f.col(\"name_chr\"))\n                & f.col(\"name_chr\").isin(\n                    [f\"{x}\" for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"]\n                )\n            )\n        )\n\n        # Lifting over intervals:\n        javierre_remapped = (\n            javierre_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\"))\n            .drop(\"start\", \"end\")\n            .withColumnRenamed(\"mapped_chrom\", \"chrom\")\n            .withColumnRenamed(\"mapped_start\", \"start\")\n            .withColumnRenamed(\"mapped_end\", \"end\")\n            # Lifting over interval 2 to GRCh38:\n            .transform(\n                lambda df: lift.convert_intervals(\n                    df, \"name_chr\", \"name_start\", \"name_end\"\n                )\n            )\n            .drop(\"name_start\", \"name_end\")\n            .withColumnRenamed(\"mapped_name_chr\", \"name_chr\")\n            .withColumnRenamed(\"mapped_name_start\", \"name_start\")\n            .withColumnRenamed(\"mapped_name_end\", \"name_end\")\n        )\n\n        # Once the intervals are lifted, extracting the unique intervals:\n        unique_intervals_with_genes = (\n            javierre_remapped.alias(\"intervals\")\n            .select(\n                f.col(\"chrom\"),\n                f.col(\"start\").cast(t.IntegerType()),\n                f.col(\"end\").cast(t.IntegerType()),\n            )\n            .distinct()\n            .join(\n                gene_index.locations_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.chrom\") == f.col(\"genes.chromosome\")],\n                how=\"left\",\n            )\n            # TODO: add filter as part of the join condition\n            .filter(\n                (\n                    (f.col(\"start\") >= f.col(\"genomicLocation.start\"))\n                    & (f.col(\"start\") <= f.col(\"genomicLocation.end\"))\n                )\n                | (\n                    (f.col(\"end\") >= f.col(\"genomicLocation.start\"))\n                    & (f.col(\"end\") <= f.col(\"genomicLocation.end\"))\n                )\n            )\n            .select(\"chrom\", \"start\", \"end\", \"geneId\", \"tss\")\n        )\n\n        # Joining back the data:\n        return cls(\n            _df=(\n                javierre_remapped.join(\n                    unique_intervals_with_genes,\n                    on=[\"chrom\", \"start\", \"end\"],\n                    how=\"left\",\n                )\n                .filter(\n                    # Drop rows where the TSS is far from the start of the region\n                    f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                    <= twosided_threshold\n                )\n                # For each gene, keep only the highest scoring interval:\n                .groupBy(\n                    \"name_chr\", \"name_start\", \"name_end\", \"genes.geneId\", \"bio_feature\"\n                )\n                .agg(f.max(f.col(\"name_score\")).alias(\"resourceScore\"))\n                # Create the output:\n                .select(\n                    f.col(\"name_chr\").alias(\"chromosome\"),\n                    f.col(\"name_start\").alias(\"start\"),\n                    f.col(\"name_end\").alias(\"end\"),\n                    f.col(\"resourceScore\"),\n                    f.col(\"genes.geneId\").alias(\"geneId\"),\n                    f.col(\"bio_feature\").alias(\"biofeature\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n            )\n        )\n\n    @classmethod\n    def parse_jung(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse the Jung et al. 2019 dataset.\n\n        Args:\n            session (Session): session\n            path (str): path to the Jung et al. 2019 dataset\n            gene_index (GeneIndex): gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: _description_\n        \"\"\"\n        dataset_name = \"javierre2016\"\n        experiment_type = \"pchic\"\n        pmid = \"27863249\"\n\n        session.logger.info(\"Parsing Jung 2019 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Jung data:\n        jung_raw = (\n            session.spark.read.csv(path, sep=\",\", header=True)\n            .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n            .select(\n                # Parsing intervals:\n                f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n                f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n                f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n                # Extract other columns:\n                f.col(\"Promoter\").alias(\"gene_name\"),\n                f.col(\"Tissue_type\").alias(\"tissue\"),\n            )\n        )\n\n        # Lifting over the coordinates:\n        return cls(\n            _df=(\n                jung_raw\n                # Lifting over to GRCh38 interval 1:\n                .transform(\n                    lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n                )\n                .select(\n                    \"chrom\",\n                    f.col(\"mapped_start\").alias(\"start\"),\n                    f.col(\"mapped_end\").alias(\"end\"),\n                    f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                    \"tissue\",\n                )\n                .alias(\"intervals\")\n                # Joining with genes:\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                    how=\"inner\",\n                )\n                # Finalize dataset:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    f.col(\"tissue\").alias(\"biofeature\"),\n                    f.lit(1.0).alias(\"score\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n                .drop_duplicates()\n            )\n        )\n\n    @classmethod\n    def parse_thurman(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse the Thurman et al. 2019 dataset.\n\n        Args:\n            session (Session): session\n            path (str): path to the Thurman et al. 2019 dataset\n            gene_index (GeneIndex): gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: _description_\n        \"\"\"\n        dataset_name = \"thurman2012\"\n        experiment_type = \"dhscor\"\n        pmid = \"22955617\"\n\n        session.logger.info(\"Parsing Jung 2019 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Jung data:\n        jung_raw = (\n            session.spark.read.csv(path, sep=\",\", header=True)\n            .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n            .select(\n                # Parsing intervals:\n                f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n                f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n                f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n                # Extract other columns:\n                f.col(\"Promoter\").alias(\"gene_name\"),\n                f.col(\"Tissue_type\").alias(\"tissue\"),\n            )\n        )\n\n        return cls(\n            _df=(\n                jung_raw\n                # Lifting over to GRCh38 interval 1:\n                .transform(\n                    lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n                )\n                .select(\n                    \"chrom\",\n                    f.col(\"mapped_start\").alias(\"start\"),\n                    f.col(\"mapped_end\").alias(\"end\"),\n                    f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                    \"tissue\",\n                )\n                .alias(\"intervals\")\n                # Joining with genes:\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                    how=\"inner\",\n                )\n                # Finalize dataset:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    f.col(\"tissue\").alias(\"biofeature\"),\n                    f.lit(1.0).alias(\"score\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n                .drop_duplicates()\n            )\n        )\n\n    def v2g(self: Intervals, variant_index: VariantIndex) -> V2G:\n\"\"\"Convert intervals into V2G by intersecting with a variant index.\n\n        Args:\n            variant_index (VariantIndex): Variant index dataset\n\n        Returns:\n            V2G: Variant-to-gene evidence dataset\n        \"\"\"\n        return V2G(\n            _df=(\n                # TODO: We can include the start and end position as part of the `on` clause in the join\n                self.df.alias(\"interval\")\n                .join(\n                    variant_index.df.selectExpr(\n                        \"chromosome as vi_chromosome\", \"variantId\", \"position\"\n                    ).alias(\"vi\"),\n                    on=[\n                        f.col(\"vi.vi_chromosome\") == f.col(\"interval.chromosome\"),\n                        f.col(\"vi.position\").between(\n                            f.col(\"interval.start\"), f.col(\"interval.end\")\n                        ),\n                    ],\n                    how=\"inner\",\n                )\n                .drop(\"start\", \"end\", \"vi_chromosome\")\n            )\n        )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise Intervals from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description Intervals Intervals

Intervals dataset

Source code in src/otg/dataset/intervals.py
@classmethod\ndef from_parquet(cls: type[Intervals], session: Session, path: str) -> Intervals:\n\"\"\"Initialise Intervals from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        Intervals: Intervals dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_andersson","title":"parse_andersson(session, path, gene_index, lift) classmethod","text":"

Parse Andersson et al. 2014 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

Path to dataset

required gene_index GeneIndex

Gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

Intervals dataset

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_andersson(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse Andersson et al. 2014 dataset.\n\n    Args:\n        session (Session): session\n        path (str): Path to dataset\n        gene_index (GeneIndex): Gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: Intervals dataset\n    \"\"\"\n    # Constant values:\n    dataset_name = \"andersson2014\"\n    experiment_type = \"fantom5\"\n    pmid = \"24670763\"\n    bio_feature = \"aggregate\"\n    twosided_threshold = 2.45e6  # <-  this needs to phased out. Filter by percentile instead of absolute value.\n\n    session.logger.info(\"Parsing Andersson 2014 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Expected andersson et al. schema:\n    input_schema = t.StructType.fromJson(\n        json.loads(\n            pkg_resources.read_text(schemas, \"andersson2014.json\", encoding=\"utf-8\")\n        )\n    )\n\n    # Read the anderson file:\n    parsed_anderson_df = (\n        session.spark.read.option(\"delimiter\", \"\\t\")\n        .option(\"header\", \"true\")\n        .schema(input_schema)\n        .csv(path)\n        # Parsing score column and casting as float:\n        .withColumn(\"score\", f.col(\"score\").cast(\"float\") / f.lit(1000))\n        # Parsing the 'name' column:\n        .withColumn(\"parsedName\", f.split(f.col(\"name\"), \";\"))\n        .withColumn(\"gene_symbol\", f.col(\"parsedName\")[2])\n        .withColumn(\"location\", f.col(\"parsedName\")[0])\n        .withColumn(\n            \"chrom\",\n            f.regexp_replace(f.split(f.col(\"location\"), \":|-\")[0], \"chr\", \"\"),\n        )\n        .withColumn(\n            \"start\", f.split(f.col(\"location\"), \":|-\")[1].cast(t.IntegerType())\n        )\n        .withColumn(\n            \"end\", f.split(f.col(\"location\"), \":|-\")[2].cast(t.IntegerType())\n        )\n        # Select relevant columns:\n        .select(\"chrom\", \"start\", \"end\", \"gene_symbol\", \"score\")\n        # Drop rows with non-canonical chromosomes:\n        .filter(\n            f.col(\"chrom\").isin([str(x) for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"])\n        )\n        # For each region/gene, keep only one row with the highest score:\n        .groupBy(\"chrom\", \"start\", \"end\", \"gene_symbol\")\n        .agg(f.max(\"score\").alias(\"resourceScore\"))\n        .orderBy(\"chrom\", \"start\")\n    )\n\n    return cls(\n        _df=(\n            # Lift over the intervals:\n            lift.convert_intervals(parsed_anderson_df, \"chrom\", \"start\", \"end\")\n            .drop(\"start\", \"end\")\n            .withColumnRenamed(\"mapped_start\", \"start\")\n            .withColumnRenamed(\"mapped_end\", \"end\")\n            .distinct()\n            # Joining with the gene index\n            .alias(\"intervals\")\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_symbol\") == f.col(\"genes.geneSymbol\")],\n                how=\"left\",\n            )\n            .filter(\n                # Drop rows where the gene is not on the same chromosome\n                (f.col(\"chrom\") == f.col(\"chromosome\"))\n                # Drop rows where the TSS is far from the start of the region\n                & (\n                    f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                    <= twosided_threshold\n                )\n            )\n            # Select relevant columns:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                \"resourceScore\",\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n                f.lit(bio_feature).alias(\"biofeature\"),\n            )\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_javierre","title":"parse_javierre(session, path, gene_index, lift) classmethod","text":"

Parse Javierre et al. 2016 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

Path to dataset

required gene_index GeneIndex

Gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

Javierre et al. 2016 interval data

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_javierre(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse Javierre et al. 2016 dataset.\n\n    Args:\n        session (Session): session\n        path (str): Path to dataset\n        gene_index (GeneIndex): Gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: Javierre et al. 2016 interval data\n    \"\"\"\n    # Constant values:\n    dataset_name = \"javierre2016\"\n    experiment_type = \"pchic\"\n    pmid = \"27863249\"\n    twosided_threshold = 2.45e6\n\n    session.logger.info(\"Parsing Javierre 2016 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Javierre data:\n    javierre_raw = (\n        session.spark.read.parquet(path)\n        # Splitting name column into chromosome, start, end, and score:\n        .withColumn(\"name_split\", f.split(f.col(\"name\"), r\":|-|,\"))\n        .withColumn(\n            \"name_chr\",\n            f.regexp_replace(f.col(\"name_split\")[0], \"chr\", \"\").cast(\n                t.StringType()\n            ),\n        )\n        .withColumn(\"name_start\", f.col(\"name_split\")[1].cast(t.IntegerType()))\n        .withColumn(\"name_end\", f.col(\"name_split\")[2].cast(t.IntegerType()))\n        .withColumn(\"name_score\", f.col(\"name_split\")[3].cast(t.FloatType()))\n        # Cleaning up chromosome:\n        .withColumn(\n            \"chrom\",\n            f.regexp_replace(f.col(\"chrom\"), \"chr\", \"\").cast(t.StringType()),\n        )\n        .drop(\"name_split\", \"name\", \"annotation\")\n        # Keep canonical chromosomes and consistent chromosomes with scores:\n        .filter(\n            (f.col(\"name_score\").isNotNull())\n            & (f.col(\"chrom\") == f.col(\"name_chr\"))\n            & f.col(\"name_chr\").isin(\n                [f\"{x}\" for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"]\n            )\n        )\n    )\n\n    # Lifting over intervals:\n    javierre_remapped = (\n        javierre_raw\n        # Lifting over to GRCh38 interval 1:\n        .transform(lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\"))\n        .drop(\"start\", \"end\")\n        .withColumnRenamed(\"mapped_chrom\", \"chrom\")\n        .withColumnRenamed(\"mapped_start\", \"start\")\n        .withColumnRenamed(\"mapped_end\", \"end\")\n        # Lifting over interval 2 to GRCh38:\n        .transform(\n            lambda df: lift.convert_intervals(\n                df, \"name_chr\", \"name_start\", \"name_end\"\n            )\n        )\n        .drop(\"name_start\", \"name_end\")\n        .withColumnRenamed(\"mapped_name_chr\", \"name_chr\")\n        .withColumnRenamed(\"mapped_name_start\", \"name_start\")\n        .withColumnRenamed(\"mapped_name_end\", \"name_end\")\n    )\n\n    # Once the intervals are lifted, extracting the unique intervals:\n    unique_intervals_with_genes = (\n        javierre_remapped.alias(\"intervals\")\n        .select(\n            f.col(\"chrom\"),\n            f.col(\"start\").cast(t.IntegerType()),\n            f.col(\"end\").cast(t.IntegerType()),\n        )\n        .distinct()\n        .join(\n            gene_index.locations_lut().alias(\"genes\"),\n            on=[f.col(\"intervals.chrom\") == f.col(\"genes.chromosome\")],\n            how=\"left\",\n        )\n        # TODO: add filter as part of the join condition\n        .filter(\n            (\n                (f.col(\"start\") >= f.col(\"genomicLocation.start\"))\n                & (f.col(\"start\") <= f.col(\"genomicLocation.end\"))\n            )\n            | (\n                (f.col(\"end\") >= f.col(\"genomicLocation.start\"))\n                & (f.col(\"end\") <= f.col(\"genomicLocation.end\"))\n            )\n        )\n        .select(\"chrom\", \"start\", \"end\", \"geneId\", \"tss\")\n    )\n\n    # Joining back the data:\n    return cls(\n        _df=(\n            javierre_remapped.join(\n                unique_intervals_with_genes,\n                on=[\"chrom\", \"start\", \"end\"],\n                how=\"left\",\n            )\n            .filter(\n                # Drop rows where the TSS is far from the start of the region\n                f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                <= twosided_threshold\n            )\n            # For each gene, keep only the highest scoring interval:\n            .groupBy(\n                \"name_chr\", \"name_start\", \"name_end\", \"genes.geneId\", \"bio_feature\"\n            )\n            .agg(f.max(f.col(\"name_score\")).alias(\"resourceScore\"))\n            # Create the output:\n            .select(\n                f.col(\"name_chr\").alias(\"chromosome\"),\n                f.col(\"name_start\").alias(\"start\"),\n                f.col(\"name_end\").alias(\"end\"),\n                f.col(\"resourceScore\"),\n                f.col(\"genes.geneId\").alias(\"geneId\"),\n                f.col(\"bio_feature\").alias(\"biofeature\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_jung","title":"parse_jung(session, path, gene_index, lift) classmethod","text":"

Parse the Jung et al. 2019 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

path to the Jung et al. 2019 dataset

required gene_index GeneIndex

gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

description

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_jung(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse the Jung et al. 2019 dataset.\n\n    Args:\n        session (Session): session\n        path (str): path to the Jung et al. 2019 dataset\n        gene_index (GeneIndex): gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: _description_\n    \"\"\"\n    dataset_name = \"javierre2016\"\n    experiment_type = \"pchic\"\n    pmid = \"27863249\"\n\n    session.logger.info(\"Parsing Jung 2019 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Jung data:\n    jung_raw = (\n        session.spark.read.csv(path, sep=\",\", header=True)\n        .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n        .select(\n            # Parsing intervals:\n            f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n            f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n            f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n            # Extract other columns:\n            f.col(\"Promoter\").alias(\"gene_name\"),\n            f.col(\"Tissue_type\").alias(\"tissue\"),\n        )\n    )\n\n    # Lifting over the coordinates:\n    return cls(\n        _df=(\n            jung_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(\n                lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n            )\n            .select(\n                \"chrom\",\n                f.col(\"mapped_start\").alias(\"start\"),\n                f.col(\"mapped_end\").alias(\"end\"),\n                f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                \"tissue\",\n            )\n            .alias(\"intervals\")\n            # Joining with genes:\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                how=\"inner\",\n            )\n            # Finalize dataset:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                f.col(\"tissue\").alias(\"biofeature\"),\n                f.lit(1.0).alias(\"score\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n            .drop_duplicates()\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_thurman","title":"parse_thurman(session, path, gene_index, lift) classmethod","text":"

Parse the Thurman et al. 2019 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

path to the Thurman et al. 2019 dataset

required gene_index GeneIndex

gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

description

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_thurman(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse the Thurman et al. 2019 dataset.\n\n    Args:\n        session (Session): session\n        path (str): path to the Thurman et al. 2019 dataset\n        gene_index (GeneIndex): gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: _description_\n    \"\"\"\n    dataset_name = \"thurman2012\"\n    experiment_type = \"dhscor\"\n    pmid = \"22955617\"\n\n    session.logger.info(\"Parsing Jung 2019 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Jung data:\n    jung_raw = (\n        session.spark.read.csv(path, sep=\",\", header=True)\n        .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n        .select(\n            # Parsing intervals:\n            f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n            f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n            f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n            # Extract other columns:\n            f.col(\"Promoter\").alias(\"gene_name\"),\n            f.col(\"Tissue_type\").alias(\"tissue\"),\n        )\n    )\n\n    return cls(\n        _df=(\n            jung_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(\n                lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n            )\n            .select(\n                \"chrom\",\n                f.col(\"mapped_start\").alias(\"start\"),\n                f.col(\"mapped_end\").alias(\"end\"),\n                f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                \"tissue\",\n            )\n            .alias(\"intervals\")\n            # Joining with genes:\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                how=\"inner\",\n            )\n            # Finalize dataset:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                f.col(\"tissue\").alias(\"biofeature\"),\n                f.lit(1.0).alias(\"score\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n            .drop_duplicates()\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.v2g","title":"v2g(variant_index)","text":"

Convert intervals into V2G by intersecting with a variant index.

Parameters:

Name Type Description Default variant_index VariantIndex

Variant index dataset

required

Returns:

Name Type Description V2G V2G

Variant-to-gene evidence dataset

Source code in src/otg/dataset/intervals.py
def v2g(self: Intervals, variant_index: VariantIndex) -> V2G:\n\"\"\"Convert intervals into V2G by intersecting with a variant index.\n\n    Args:\n        variant_index (VariantIndex): Variant index dataset\n\n    Returns:\n        V2G: Variant-to-gene evidence dataset\n    \"\"\"\n    return V2G(\n        _df=(\n            # TODO: We can include the start and end position as part of the `on` clause in the join\n            self.df.alias(\"interval\")\n            .join(\n                variant_index.df.selectExpr(\n                    \"chromosome as vi_chromosome\", \"variantId\", \"position\"\n                ).alias(\"vi\"),\n                on=[\n                    f.col(\"vi.vi_chromosome\") == f.col(\"interval.chromosome\"),\n                    f.col(\"vi.position\").between(\n                        f.col(\"interval.start\"), f.col(\"interval.end\")\n                    ),\n                ],\n                how=\"inner\",\n            )\n            .drop(\"start\", \"end\", \"vi_chromosome\")\n        )\n    )\n
"},{"location":"components/dataset/intervals/#schema","title":"Schema","text":"
root\n |-- chromosome: string (nullable = false)\n |-- start: string (nullable = false)\n |-- end: string (nullable = false)\n |-- geneId: string (nullable = false)\n |-- resourceScore: double (nullable = true)\n |-- score: double (nullable = true)\n |-- datasourceId: string (nullable = false)\n |-- datatypeId: string (nullable = false)\n |-- pmid: string (nullable = true)\n |-- biofeature: string (nullable = true)\n
"},{"location":"components/dataset/ld_index/","title":"LD index","text":"

Bases: Dataset

Dataset to index access to LD information from GnomAD.

Source code in src/otg/dataset/ld_index.py
@dataclass\nclass LDIndex(Dataset):\n\"\"\"Dataset to index access to LD information from GnomAD.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"ld_index.json\")\n\n    @staticmethod\n    def _liftover_loci(variant_index: Table, grch37_to_grch38_chain_path: str) -> Table:\n\"\"\"Liftover hail table with LD variant index.\n\n        Args:\n            variant_index (Table): LD variant indexes\n            grch37_to_grch38_chain_path (str): Path to chain file\n\n        Returns:\n            Table: LD variant index with locus 38 coordinates\n        \"\"\"\n        if not hl.get_reference(\"GRCh37\").has_liftover(\"GRCh38\"):\n            rg37 = hl.get_reference(\"GRCh37\")\n            rg38 = hl.get_reference(\"GRCh38\")\n            rg37.add_liftover(grch37_to_grch38_chain_path, rg38)\n\n        return variant_index.annotate(\n            locus38=hl.liftover(variant_index.locus, \"GRCh38\")\n        )\n\n    @staticmethod\n    def _interval_start(contig: Column, position: Column, ld_radius: int) -> Column:\n\"\"\"Start position of the interval based on available positions.\n\n        Args:\n            contig (Column): genomic contigs\n            position (Column): genomic positions\n            ld_radius (int): bp around locus\n\n        Returns:\n            Column: Position of the locus starting the interval\n\n        Examples:\n            >>> d = [\n            ...     {\"contig\": \"21\", \"pos\": 100},\n            ...     {\"contig\": \"21\", \"pos\": 200},\n            ...     {\"contig\": \"21\", \"pos\": 300},\n            ... ]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"start\", LDIndex._interval_start(f.col(\"contig\"), f.col(\"pos\"), 100)).show()\n            +------+---+-----+\n            |contig|pos|start|\n            +------+---+-----+\n            |    21|100|  100|\n            |    21|200|  100|\n            |    21|300|  200|\n            +------+---+-----+\n            <BLANKLINE>\n\n        \"\"\"\n        w = (\n            Window.partitionBy(contig)\n            .orderBy(position)\n            .rangeBetween(-ld_radius, ld_radius)\n        )\n        return f.min(position).over(w)\n\n    @staticmethod\n    def _interval_stop(contig: Column, position: Column, ld_radius: int) -> Column:\n\"\"\"Stop position of the interval based on available positions.\n\n        Args:\n            contig (Column): genomic contigs\n            position (Column): genomic positions\n            ld_radius (int): bp around locus\n\n        Returns:\n            Column: Position of the locus at the end of the interval\n\n        Examples:\n            >>> d = [\n            ...     {\"contig\": \"21\", \"pos\": 100},\n            ...     {\"contig\": \"21\", \"pos\": 200},\n            ...     {\"contig\": \"21\", \"pos\": 300},\n            ... ]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"start\", LDIndex._interval_stop(f.col(\"contig\"), f.col(\"pos\"), 100)).show()\n            +------+---+-----+\n            |contig|pos|start|\n            +------+---+-----+\n            |    21|100|  200|\n            |    21|200|  300|\n            |    21|300|  300|\n            +------+---+-----+\n            <BLANKLINE>\n\n        \"\"\"\n        w = (\n            Window.partitionBy(contig)\n            .orderBy(position)\n            .rangeBetween(-ld_radius, ld_radius)\n        )\n        return f.max(position).over(w)\n\n    @classmethod\n    def from_parquet(cls: type[LDIndex], session: Session, path: str) -> LDIndex:\n\"\"\"Initialise LD index from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            LDIndex: LD index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def create(\n        cls: type[LDIndex],\n        pop_ldindex_path: str,\n        ld_radius: int,\n        grch37_to_grch38_chain_path: str,\n    ) -> LDIndex:\n\"\"\"Parse LD index and annotate with interval start and stop.\n\n        Args:\n            pop_ldindex_path (str): path to gnomAD LD index\n            ld_radius (int): radius\n            grch37_to_grch38_chain_path (str): path to chain file for liftover\n\n        Returns:\n            LDIndex: Created GnomAD LD index\n        \"\"\"\n        ld_index = hl.read_table(pop_ldindex_path).naive_coalesce(400)\n        ld_index_38 = LDIndex._liftover_loci(ld_index, grch37_to_grch38_chain_path)\n\n        return cls(\n            _df=ld_index_38.to_spark()\n            .filter(f.col(\"`locus38.position`\").isNotNull())\n            .select(\n                f.coalesce(f.col(\"idx\"), f.monotonically_increasing_id()).alias(\"idx\"),\n                f.coalesce(\n                    f.regexp_replace(\"`locus38.contig`\", \"chr\", \"\"), f.lit(\"unknown\")\n                ).alias(\"chromosome\"),\n                f.coalesce(f.col(\"`locus38.position`\"), f.lit(-1)).alias(\"position\"),\n                f.coalesce(f.col(\"`alleles`\").getItem(0), f.lit(\"?\")).alias(\n                    \"referenceAllele\"\n                ),\n                f.coalesce(f.col(\"`alleles`\").getItem(1), f.lit(\"?\")).alias(\n                    \"alternateAllele\"\n                ),\n            )\n            # Convert gnomad position to Ensembl position (1-based for indels)\n            .withColumn(\n                \"position\",\n                convert_gnomad_position_to_ensembl(\n                    f.col(\"position\"),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                ),\n            )\n            .withColumn(\n                \"variantId\",\n                f.concat_ws(\n                    \"_\",\n                    f.col(\"chromosome\"),\n                    f.col(\"position\"),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                ),\n            )\n            # Filter out variants mapping to several indices due to liftover\n            .withColumn(\"count\", f.count(\"*\").over(Window.partitionBy([\"variantId\"])))\n            .filter(f.col(\"count\") == 1)\n            .drop(\"count\")\n            .withColumn(\"start_idx\", f.lit(None).cast(t.LongType()))\n            .withColumn(\"stop_idx\", f.lit(None).cast(t.LongType()))\n            .repartition(400, \"chromosome\")\n            .sortWithinPartitions(\"position\")\n            .persist()\n        ).annotate_index_intervals(ld_radius)\n\n    def annotate_index_intervals(self: LDIndex, ld_radius: int) -> LDIndex:\n\"\"\"Annotate LD index with indices starting and stopping at a given interval.\n\n        Args:\n            ld_radius (int): radius around each position\n\n        Returns:\n            LDIndex: including `start_idx` and `stop_idx` columns\n        \"\"\"\n        index_with_positions = (\n            self._df.drop(\"start_idx\", \"stop_idx\")\n            .select(\n                \"*\",\n                LDIndex._interval_start(\n                    contig=f.col(\"chromosome\"),\n                    position=f.col(\"position\"),\n                    ld_radius=ld_radius,\n                ).alias(\"start_pos\"),\n                LDIndex._interval_stop(\n                    contig=f.col(\"chromosome\"),\n                    position=f.col(\"position\"),\n                    ld_radius=ld_radius,\n                ).alias(\"stop_pos\"),\n            )\n            .persist()\n        )\n\n        self.df = (\n            index_with_positions.join(\n                (\n                    index_with_positions\n                    # Given the multiple variants with the same chromosome/position can have different indices, filter for the lowest index:\n                    .transform(\n                        lambda df: get_record_with_minimum_value(\n                            df, [\"chromosome\", \"position\"], \"idx\"\n                        )\n                    ).select(\n                        \"chromosome\",\n                        f.col(\"position\").alias(\"start_pos\"),\n                        f.col(\"idx\").alias(\"start_idx\"),\n                    )\n                ),\n                on=[\"chromosome\", \"start_pos\"],\n            )\n            .join(\n                (\n                    index_with_positions\n                    # Given the multiple variants with the same chromosome/position can have different indices, filter for the highest index:\n                    .transform(\n                        lambda df: get_record_with_maximum_value(\n                            df, [\"chromosome\", \"position\"], \"idx\"\n                        )\n                    ).select(\n                        \"chromosome\",\n                        f.col(\"position\").alias(\"stop_pos\"),\n                        f.col(\"idx\").alias(\"stop_idx\"),\n                    )\n                ),\n                on=[\"chromosome\", \"stop_pos\"],\n            )\n            # Filter out variants for which start idx > stop idx due to liftover\n            .filter(f.col(\"start_idx\") < f.col(\"stop_idx\"))\n            .drop(\"start_pos\", \"stop_pos\")\n        )\n\n        return self\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.annotate_index_intervals","title":"annotate_index_intervals(ld_radius)","text":"

Annotate LD index with indices starting and stopping at a given interval.

Parameters:

Name Type Description Default ld_radius int

radius around each position

required

Returns:

Name Type Description LDIndex LDIndex

including start_idx and stop_idx columns

Source code in src/otg/dataset/ld_index.py
def annotate_index_intervals(self: LDIndex, ld_radius: int) -> LDIndex:\n\"\"\"Annotate LD index with indices starting and stopping at a given interval.\n\n    Args:\n        ld_radius (int): radius around each position\n\n    Returns:\n        LDIndex: including `start_idx` and `stop_idx` columns\n    \"\"\"\n    index_with_positions = (\n        self._df.drop(\"start_idx\", \"stop_idx\")\n        .select(\n            \"*\",\n            LDIndex._interval_start(\n                contig=f.col(\"chromosome\"),\n                position=f.col(\"position\"),\n                ld_radius=ld_radius,\n            ).alias(\"start_pos\"),\n            LDIndex._interval_stop(\n                contig=f.col(\"chromosome\"),\n                position=f.col(\"position\"),\n                ld_radius=ld_radius,\n            ).alias(\"stop_pos\"),\n        )\n        .persist()\n    )\n\n    self.df = (\n        index_with_positions.join(\n            (\n                index_with_positions\n                # Given the multiple variants with the same chromosome/position can have different indices, filter for the lowest index:\n                .transform(\n                    lambda df: get_record_with_minimum_value(\n                        df, [\"chromosome\", \"position\"], \"idx\"\n                    )\n                ).select(\n                    \"chromosome\",\n                    f.col(\"position\").alias(\"start_pos\"),\n                    f.col(\"idx\").alias(\"start_idx\"),\n                )\n            ),\n            on=[\"chromosome\", \"start_pos\"],\n        )\n        .join(\n            (\n                index_with_positions\n                # Given the multiple variants with the same chromosome/position can have different indices, filter for the highest index:\n                .transform(\n                    lambda df: get_record_with_maximum_value(\n                        df, [\"chromosome\", \"position\"], \"idx\"\n                    )\n                ).select(\n                    \"chromosome\",\n                    f.col(\"position\").alias(\"stop_pos\"),\n                    f.col(\"idx\").alias(\"stop_idx\"),\n                )\n            ),\n            on=[\"chromosome\", \"stop_pos\"],\n        )\n        # Filter out variants for which start idx > stop idx due to liftover\n        .filter(f.col(\"start_idx\") < f.col(\"stop_idx\"))\n        .drop(\"start_pos\", \"stop_pos\")\n    )\n\n    return self\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.create","title":"create(pop_ldindex_path, ld_radius, grch37_to_grch38_chain_path) classmethod","text":"

Parse LD index and annotate with interval start and stop.

Parameters:

Name Type Description Default pop_ldindex_path str

path to gnomAD LD index

required ld_radius int

radius

required grch37_to_grch38_chain_path str

path to chain file for liftover

required

Returns:

Name Type Description LDIndex LDIndex

Created GnomAD LD index

Source code in src/otg/dataset/ld_index.py
@classmethod\ndef create(\n    cls: type[LDIndex],\n    pop_ldindex_path: str,\n    ld_radius: int,\n    grch37_to_grch38_chain_path: str,\n) -> LDIndex:\n\"\"\"Parse LD index and annotate with interval start and stop.\n\n    Args:\n        pop_ldindex_path (str): path to gnomAD LD index\n        ld_radius (int): radius\n        grch37_to_grch38_chain_path (str): path to chain file for liftover\n\n    Returns:\n        LDIndex: Created GnomAD LD index\n    \"\"\"\n    ld_index = hl.read_table(pop_ldindex_path).naive_coalesce(400)\n    ld_index_38 = LDIndex._liftover_loci(ld_index, grch37_to_grch38_chain_path)\n\n    return cls(\n        _df=ld_index_38.to_spark()\n        .filter(f.col(\"`locus38.position`\").isNotNull())\n        .select(\n            f.coalesce(f.col(\"idx\"), f.monotonically_increasing_id()).alias(\"idx\"),\n            f.coalesce(\n                f.regexp_replace(\"`locus38.contig`\", \"chr\", \"\"), f.lit(\"unknown\")\n            ).alias(\"chromosome\"),\n            f.coalesce(f.col(\"`locus38.position`\"), f.lit(-1)).alias(\"position\"),\n            f.coalesce(f.col(\"`alleles`\").getItem(0), f.lit(\"?\")).alias(\n                \"referenceAllele\"\n            ),\n            f.coalesce(f.col(\"`alleles`\").getItem(1), f.lit(\"?\")).alias(\n                \"alternateAllele\"\n            ),\n        )\n        # Convert gnomad position to Ensembl position (1-based for indels)\n        .withColumn(\n            \"position\",\n            convert_gnomad_position_to_ensembl(\n                f.col(\"position\"),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n            ),\n        )\n        .withColumn(\n            \"variantId\",\n            f.concat_ws(\n                \"_\",\n                f.col(\"chromosome\"),\n                f.col(\"position\"),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n            ),\n        )\n        # Filter out variants mapping to several indices due to liftover\n        .withColumn(\"count\", f.count(\"*\").over(Window.partitionBy([\"variantId\"])))\n        .filter(f.col(\"count\") == 1)\n        .drop(\"count\")\n        .withColumn(\"start_idx\", f.lit(None).cast(t.LongType()))\n        .withColumn(\"stop_idx\", f.lit(None).cast(t.LongType()))\n        .repartition(400, \"chromosome\")\n        .sortWithinPartitions(\"position\")\n        .persist()\n    ).annotate_index_intervals(ld_radius)\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise LD index from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description LDIndex LDIndex

LD index dataset

Source code in src/otg/dataset/ld_index.py
@classmethod\ndef from_parquet(cls: type[LDIndex], session: Session, path: str) -> LDIndex:\n\"\"\"Initialise LD index from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        LDIndex: LD index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/ld_index/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- idx: long (nullable = false)\n |-- start_idx: long (nullable = true)\n |-- stop_idx: long (nullable = true)\n
"},{"location":"components/dataset/study_locus_overlap/","title":"Study locus overlap","text":"

Bases: Dataset

Study-Locus overlap.

This dataset captures pairs of overlapping StudyLocus.

Source code in src/otg/dataset/study_locus_overlap.py
@dataclass\nclass StudyLocusOverlap(Dataset):\n\"\"\"Study-Locus overlap.\n\n    This dataset captures pairs of overlapping `StudyLocus`.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"study_locus_overlap.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[StudyLocusOverlap], session: Session, path: str\n    ) -> StudyLocusOverlap:\n\"\"\"Initialise StudyLocusOverlap from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyLocusOverlap: Study-locus overlap dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus_overlap/#otg.dataset.study_locus_overlap.StudyLocusOverlap.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyLocusOverlap from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyLocusOverlap StudyLocusOverlap

Study-locus overlap dataset

Source code in src/otg/dataset/study_locus_overlap.py
@classmethod\ndef from_parquet(\n    cls: type[StudyLocusOverlap], session: Session, path: str\n) -> StudyLocusOverlap:\n\"\"\"Initialise StudyLocusOverlap from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyLocusOverlap: Study-locus overlap dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus_overlap/#schema","title":"Schema","text":"
root\n |-- left_studyLocusId: long (nullable = false)\n |-- right_studyLocusId: long (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- tagVariantId: string (nullable = false)\n |-- right_logABF: double (nullable = true)\n |-- left_logABF: double (nullable = true)\n |-- right_posteriorProbability: double (nullable = true)\n |-- left_posteriorProbability: double (nullable = true)\n
"},{"location":"components/dataset/summary_statistics/","title":"Summary statistics","text":"

Bases: Dataset

Summary Statistics dataset.

A summary statistics dataset contains all single point statistics resulting from a GWAS.

Source code in src/otg/dataset/summary_statistics.py
@dataclass\nclass SummaryStatistics(Dataset):\n\"\"\"Summary Statistics dataset.\n\n    A summary statistics dataset contains all single point statistics resulting from a GWAS.\n    \"\"\"\n\n    _schema: t.StructType = parse_spark_schema(\"summary_statistics.json\")\n\n    @staticmethod\n    def _convert_odds_ratio_to_beta(\n        beta: Column, odds_ratio: Column, standard_error: Column\n    ) -> tuple:\n\"\"\"Harmonizes effect and standard error to beta.\n\n        Args:\n            beta (Column): Effect in beta\n            odds_ratio (Column): Effect in odds ratio\n            standard_error (Column): Standard error of the effect\n\n        Returns:\n            tuple: beta, standard error\n\n        Examples:\n            >>> df = spark.createDataFrame([{\"beta\": 0.1, \"oddsRatio\": 1.1, \"standardError\": 0.1}, {\"beta\": None, \"oddsRatio\": 1.1, \"standardError\": 0.1}, {\"beta\": 0.1, \"oddsRatio\": None, \"standardError\": 0.1}, {\"beta\": 0.1, \"oddsRatio\": 1.1, \"standardError\": None}])\n            >>> df.select(\"*\", *SummaryStatistics._convert_odds_ratio_to_beta(f.col(\"beta\"), f.col(\"oddsRatio\"), f.col(\"standardError\"))).show()\n            +----+---------+-------------+-------------------+-------------+\n            |beta|oddsRatio|standardError|               beta|standardError|\n            +----+---------+-------------+-------------------+-------------+\n            | 0.1|      1.1|          0.1|                0.1|          0.1|\n            |null|      1.1|          0.1|0.09531017980432493|         null|\n            | 0.1|     null|          0.1|                0.1|          0.1|\n            | 0.1|      1.1|         null|                0.1|         null|\n            +----+---------+-------------+-------------------+-------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # We keep standard error when effect is given in beta, otherwise drop.\n        standard_error = f.when(\n            standard_error.isNotNull() & beta.isNotNull(), standard_error\n        ).alias(\"standardError\")\n\n        # Odds ratio is converted to beta:\n        beta = (\n            f.when(beta.isNotNull(), beta)\n            .when(odds_ratio.isNotNull(), f.log(odds_ratio))\n            .alias(\"beta\")\n        )\n\n        return (beta, standard_error)\n\n    @staticmethod\n    def _calculate_confidence_interval(\n        pvalue_mantissa: Column,\n        pvalue_exponent: Column,\n        beta: Column,\n        standard_error: Column,\n    ) -> tuple:\n\"\"\"This function calculates the confidence interval for the effect based on the p-value and the effect size.\n\n        If the standard error already available, don't re-calculate from p-value.\n\n        Args:\n            pvalue_mantissa (Column): p-value mantissa (float)\n            pvalue_exponent (Column): p-value exponent (integer)\n            beta (Column): effect size in beta (float)\n            standard_error (Column): standard error.\n\n        Returns:\n            tuple: betaConfidenceIntervalLower (float), betaConfidenceIntervalUpper (float)\n        \"\"\"\n        # Calculate p-value from mantissa and exponent:\n        pvalue = pvalue_mantissa * f.pow(10, pvalue_exponent)\n\n        # Fix p-value underflow:\n        pvalue = f.when(pvalue == 0, sys.float_info.min).otherwise(pvalue)\n\n        # Compute missing standard error:\n        standard_error = f.when(\n            standard_error.isNull(), f.abs(beta) / f.abs(pvalue_to_zscore(pvalue))\n        ).otherwise(standard_error)\n\n        # Calculate upper and lower confidence interval:\n        ci_lower = (beta - standard_error).alias(\"betaConfidenceIntervalLower\")\n        ci_upper = (beta + standard_error).alias(\"betaConfidenceIntervalUpper\")\n\n        return (ci_lower, ci_upper)\n\n    @classmethod\n    def from_parquet(\n        cls: type[SummaryStatistics], session: Session, path: str\n    ) -> SummaryStatistics:\n\"\"\"Initialise SummaryStatistics from parquet file.\n\n        Args:\n            session (Session): Session\n            path (str): Path to parquet file\n\n        Returns:\n            SummaryStatistics: SummaryStatistics dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_gwas_harmonized_summary_stats(\n        cls: type[SummaryStatistics],\n        sumstats_df: DataFrame,\n        study_id: str,\n    ) -> SummaryStatistics:\n\"\"\"Create summary statistics object from summary statistics harmonized by the GWAS Catalog.\n\n        Args:\n            sumstats_df (DataFrame): Harmonized dataset read as dataframe from GWAS Catalog.\n            study_id (str): GWAS Catalog Study accession.\n\n        Returns:\n            SummaryStatistics\n        \"\"\"\n        # The effect allele frequency is an optional column, we have to test if it is there:\n        allele_frequency_expression = (\n            f.col(\"hm_effect_allele_frequency\").cast(t.DoubleType())\n            if \"hm_effect_allele_frequency\" in sumstats_df.columns\n            else f.lit(None)\n        )\n\n        # Processing columns of interest:\n        processed_sumstats_df = (\n            sumstats_df.select(\n                # Adding study identifier:\n                f.lit(study_id).cast(t.StringType()).alias(\"studyId\"),\n                # Adding variant identifier:\n                f.col(\"hm_variant_id\").alias(\"variantId\"),\n                f.col(\"hm_chrom\").alias(\"chromosome\"),\n                f.col(\"hm_pos\").cast(t.IntegerType()).alias(\"position\"),\n                # Parsing p-value mantissa and exponent:\n                *parse_pvalue(f.col(\"p_value\").cast(t.FloatType())),\n                # Converting/calculating effect and confidence interval:\n                *cls._convert_odds_ratio_to_beta(\n                    f.col(\"hm_beta\").cast(t.DoubleType()),\n                    f.col(\"hm_odds_ratio\").cast(t.DoubleType()),\n                    f.col(\"standard_error\").cast(t.DoubleType()),\n                ),\n                allele_frequency_expression.alias(\"effectAlleleFrequencyFromSource\"),\n            )\n            .repartition(200, \"chromosome\")\n            .sortWithinPartitions(\"position\")\n        )\n\n        # Initializing summary statistics object:\n        return cls(\n            _df=processed_sumstats_df,\n        )\n\n    def calculate_confidence_interval(self: SummaryStatistics) -> SummaryStatistics:\n\"\"\"A Function to add upper and lower confidence interval to a summary statistics dataset.\n\n        Returns:\n            SummaryStatistics:\n        \"\"\"\n        columns = self._df.columns\n\n        # If confidence interval has already been calculated skip:\n        if (\n            \"betaConfidenceIntervalLower\" in columns\n            and \"betaConfidenceIntervalUpper\" in columns\n        ):\n            return self\n\n        # Calculate CI:\n        return SummaryStatistics(\n            _df=(\n                self._df.select(\n                    \"*\",\n                    *self._calculate_confidence_interval(\n                        f.col(\"pValueMantissa\"),\n                        f.col(\"pValueExponent\"),\n                        f.col(\"beta\"),\n                        f.col(\"standardError\"),\n                    ),\n                )\n            )\n        )\n\n    def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:\n\"\"\"Filter summary statistics based on the provided p-value threshold.\n\n        Args:\n            pvalue (float): upper limit of the p-value to be filtered upon.\n\n        Returns:\n            SummaryStatistics: summary statistics object containing single point associations with p-values at least as significant as the provided threshold.\n        \"\"\"\n        # Converting p-value to mantissa and exponent:\n        (mantissa, exponent) = split_pvalue(pvalue)\n\n        # Applying filter:\n        df = self._df.filter(\n            (f.col(\"pValueExponent\") < exponent)\n            | (\n                (f.col(\"pValueExponent\") == exponent)\n                & (f.col(\"pValueMantissa\") <= mantissa)\n            )\n        )\n        return SummaryStatistics(_df=df)\n\n    def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:\n\"\"\"Perform distance-based clumping.\n\n        Args:\n            distance (int): Distance in base pairs\n\n        Returns:\n            StudyLocus: StudyLocus object\n        \"\"\"\n        # Calculate distance-based clumping:\n        return WindowBasedClumping.clump(self, distance)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.calculate_confidence_interval","title":"calculate_confidence_interval()","text":"

A Function to add upper and lower confidence interval to a summary statistics dataset.

Returns:

Name Type Description SummaryStatistics SummaryStatistics Source code in src/otg/dataset/summary_statistics.py
def calculate_confidence_interval(self: SummaryStatistics) -> SummaryStatistics:\n\"\"\"A Function to add upper and lower confidence interval to a summary statistics dataset.\n\n    Returns:\n        SummaryStatistics:\n    \"\"\"\n    columns = self._df.columns\n\n    # If confidence interval has already been calculated skip:\n    if (\n        \"betaConfidenceIntervalLower\" in columns\n        and \"betaConfidenceIntervalUpper\" in columns\n    ):\n        return self\n\n    # Calculate CI:\n    return SummaryStatistics(\n        _df=(\n            self._df.select(\n                \"*\",\n                *self._calculate_confidence_interval(\n                    f.col(\"pValueMantissa\"),\n                    f.col(\"pValueExponent\"),\n                    f.col(\"beta\"),\n                    f.col(\"standardError\"),\n                ),\n            )\n        )\n    )\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.from_gwas_harmonized_summary_stats","title":"from_gwas_harmonized_summary_stats(sumstats_df, study_id) classmethod","text":"

Create summary statistics object from summary statistics harmonized by the GWAS Catalog.

Parameters:

Name Type Description Default sumstats_df DataFrame

Harmonized dataset read as dataframe from GWAS Catalog.

required study_id str

GWAS Catalog Study accession.

required

Returns:

Type Description SummaryStatistics

SummaryStatistics

Source code in src/otg/dataset/summary_statistics.py
@classmethod\ndef from_gwas_harmonized_summary_stats(\n    cls: type[SummaryStatistics],\n    sumstats_df: DataFrame,\n    study_id: str,\n) -> SummaryStatistics:\n\"\"\"Create summary statistics object from summary statistics harmonized by the GWAS Catalog.\n\n    Args:\n        sumstats_df (DataFrame): Harmonized dataset read as dataframe from GWAS Catalog.\n        study_id (str): GWAS Catalog Study accession.\n\n    Returns:\n        SummaryStatistics\n    \"\"\"\n    # The effect allele frequency is an optional column, we have to test if it is there:\n    allele_frequency_expression = (\n        f.col(\"hm_effect_allele_frequency\").cast(t.DoubleType())\n        if \"hm_effect_allele_frequency\" in sumstats_df.columns\n        else f.lit(None)\n    )\n\n    # Processing columns of interest:\n    processed_sumstats_df = (\n        sumstats_df.select(\n            # Adding study identifier:\n            f.lit(study_id).cast(t.StringType()).alias(\"studyId\"),\n            # Adding variant identifier:\n            f.col(\"hm_variant_id\").alias(\"variantId\"),\n            f.col(\"hm_chrom\").alias(\"chromosome\"),\n            f.col(\"hm_pos\").cast(t.IntegerType()).alias(\"position\"),\n            # Parsing p-value mantissa and exponent:\n            *parse_pvalue(f.col(\"p_value\").cast(t.FloatType())),\n            # Converting/calculating effect and confidence interval:\n            *cls._convert_odds_ratio_to_beta(\n                f.col(\"hm_beta\").cast(t.DoubleType()),\n                f.col(\"hm_odds_ratio\").cast(t.DoubleType()),\n                f.col(\"standard_error\").cast(t.DoubleType()),\n            ),\n            allele_frequency_expression.alias(\"effectAlleleFrequencyFromSource\"),\n        )\n        .repartition(200, \"chromosome\")\n        .sortWithinPartitions(\"position\")\n    )\n\n    # Initializing summary statistics object:\n    return cls(\n        _df=processed_sumstats_df,\n    )\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise SummaryStatistics from parquet file.

Parameters:

Name Type Description Default session Session

Session

required path str

Path to parquet file

required

Returns:

Name Type Description SummaryStatistics SummaryStatistics

SummaryStatistics dataset

Source code in src/otg/dataset/summary_statistics.py
@classmethod\ndef from_parquet(\n    cls: type[SummaryStatistics], session: Session, path: str\n) -> SummaryStatistics:\n\"\"\"Initialise SummaryStatistics from parquet file.\n\n    Args:\n        session (Session): Session\n        path (str): Path to parquet file\n\n    Returns:\n        SummaryStatistics: SummaryStatistics dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.pvalue_filter","title":"pvalue_filter(pvalue)","text":"

Filter summary statistics based on the provided p-value threshold.

Parameters:

Name Type Description Default pvalue float

upper limit of the p-value to be filtered upon.

required

Returns:

Name Type Description SummaryStatistics SummaryStatistics

summary statistics object containing single point associations with p-values at least as significant as the provided threshold.

Source code in src/otg/dataset/summary_statistics.py
def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:\n\"\"\"Filter summary statistics based on the provided p-value threshold.\n\n    Args:\n        pvalue (float): upper limit of the p-value to be filtered upon.\n\n    Returns:\n        SummaryStatistics: summary statistics object containing single point associations with p-values at least as significant as the provided threshold.\n    \"\"\"\n    # Converting p-value to mantissa and exponent:\n    (mantissa, exponent) = split_pvalue(pvalue)\n\n    # Applying filter:\n    df = self._df.filter(\n        (f.col(\"pValueExponent\") < exponent)\n        | (\n            (f.col(\"pValueExponent\") == exponent)\n            & (f.col(\"pValueMantissa\") <= mantissa)\n        )\n    )\n    return SummaryStatistics(_df=df)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.window_based_clumping","title":"window_based_clumping(distance)","text":"

Perform distance-based clumping.

Parameters:

Name Type Description Default distance int

Distance in base pairs

required

Returns:

Name Type Description StudyLocus StudyLocus

StudyLocus object

Source code in src/otg/dataset/summary_statistics.py
def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:\n\"\"\"Perform distance-based clumping.\n\n    Args:\n        distance (int): Distance in base pairs\n\n    Returns:\n        StudyLocus: StudyLocus object\n    \"\"\"\n    # Calculate distance-based clumping:\n    return WindowBasedClumping.clump(self, distance)\n
"},{"location":"components/dataset/summary_statistics/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- beta: double (nullable = false)\n |-- betaConfidenceIntervalLower: double (nullable = true)\n |-- betaConfidenceIntervalUpper: double (nullable = true)\n |-- pValueMantissa: float (nullable = false)\n |-- pValueExponent: integer (nullable = false)\n |-- effectAlleleFrequencyFromSource: double (nullable = true)\n |-- standardError: double (nullable = true)\n
"},{"location":"components/dataset/variant_annotation/","title":"Variant annotation","text":"

Bases: Dataset

Dataset with variant-level annotations derived from GnomAD.

Source code in src/otg/dataset/variant_annotation.py
@dataclass\nclass VariantAnnotation(Dataset):\n\"\"\"Dataset with variant-level annotations derived from GnomAD.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"variant_annotation.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[VariantAnnotation], session: Session, path: str\n    ) -> VariantAnnotation:\n\"\"\"Initialise VariantAnnotation from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            VariantAnnotation: VariantAnnotation dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_gnomad(\n        cls: type[VariantAnnotation],\n        gnomad_file: str,\n        grch38_to_grch37_chain: str,\n        populations: list,\n    ) -> VariantAnnotation:\n\"\"\"Generate variant annotation dataset from gnomAD.\n\n        Some relevant modifications to the original dataset are:\n\n        1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.\n        2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.\n        3. Field names are converted to camel case to follow the convention.\n\n        Args:\n            gnomad_file (str): Path to `gnomad.genomes.vX.X.X.sites.ht` gnomAD dataset\n            grch38_to_grch37_chain (str): Path to chain file for liftover\n            populations (list): List of populations to include in the dataset\n\n        Returns:\n            VariantAnnotation: Variant annotation dataset\n        \"\"\"\n        # Load variants dataset\n        ht = hl.read_table(\n            gnomad_file,\n            _load_refs=False,\n        )\n\n        # Liftover\n        grch37 = hl.get_reference(\"GRCh37\")\n        grch38 = hl.get_reference(\"GRCh38\")\n        grch38.add_liftover(grch38_to_grch37_chain, grch37)\n\n        # Drop non biallelic variants\n        ht = ht.filter(ht.alleles.length() == 2)\n        # Liftover\n        ht = ht.annotate(locus_GRCh37=hl.liftover(ht.locus, \"GRCh37\"))\n        # Select relevant fields and nested records to create class\n        return cls(\n            _df=(\n                ht.select(\n                    gnomad3VariantId=hl.str(\"-\").join(\n                        [\n                            ht.locus.contig.replace(\"chr\", \"\"),\n                            hl.str(ht.locus.position),\n                            ht.alleles[0],\n                            ht.alleles[1],\n                        ]\n                    ),\n                    chromosome=ht.locus.contig.replace(\"chr\", \"\"),\n                    position=convert_gnomad_position_to_ensembl_hail(\n                        ht.locus.position, ht.alleles[0], ht.alleles[1]\n                    ),\n                    variantId=hl.str(\"_\").join(\n                        [\n                            ht.locus.contig.replace(\"chr\", \"\"),\n                            hl.str(\n                                convert_gnomad_position_to_ensembl_hail(\n                                    ht.locus.position, ht.alleles[0], ht.alleles[1]\n                                )\n                            ),\n                            ht.alleles[0],\n                            ht.alleles[1],\n                        ]\n                    ),\n                    chromosomeB37=ht.locus_GRCh37.contig.replace(\"chr\", \"\"),\n                    positionB37=ht.locus_GRCh37.position,\n                    referenceAllele=ht.alleles[0],\n                    alternateAllele=ht.alleles[1],\n                    rsIds=ht.rsid,\n                    alleleType=ht.allele_info.allele_type,\n                    cadd=hl.struct(\n                        phred=ht.cadd.phred,\n                        raw=ht.cadd.raw_score,\n                    ),\n                    alleleFrequencies=hl.set([f\"{pop}-adj\" for pop in populations]).map(\n                        lambda p: hl.struct(\n                            populationName=p,\n                            alleleFrequency=ht.freq[ht.globals.freq_index_dict[p]].AF,\n                        )\n                    ),\n                    vep=hl.struct(\n                        mostSevereConsequence=ht.vep.most_severe_consequence,\n                        transcriptConsequences=hl.map(\n                            lambda x: hl.struct(\n                                aminoAcids=x.amino_acids,\n                                consequenceTerms=x.consequence_terms,\n                                geneId=x.gene_id,\n                                lof=x.lof,\n                                polyphenScore=x.polyphen_score,\n                                polyphenPrediction=x.polyphen_prediction,\n                                siftScore=x.sift_score,\n                                siftPrediction=x.sift_prediction,\n                            ),\n                            # Only keeping canonical transcripts\n                            ht.vep.transcript_consequences.filter(\n                                lambda x: (x.canonical == 1)\n                                & (x.gene_symbol_source == \"HGNC\")\n                            ),\n                        ),\n                    ),\n                )\n                .key_by(\"chromosome\", \"position\")\n                .drop(\"locus\", \"alleles\")\n                .select_globals()\n                .to_spark(flatten=False)\n            )\n        )\n\n    def persist(self: VariantAnnotation) -> VariantAnnotation:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n        self.df = self._df.persist()\n        return self\n\n    def max_maf(self: VariantAnnotation) -> Column:\n\"\"\"Maximum minor allele frequency accross all populations.\n\n        Returns:\n            Column: Maximum minor allele frequency accross all populations.\n        \"\"\"\n        return f.array_max(\n            f.transform(\n                self.df.alleleFrequencies,\n                lambda af: f.when(\n                    af.alleleFrequency > 0.5, 1 - af.alleleFrequency\n                ).otherwise(af.alleleFrequency),\n            )\n        )\n\n    def filter_by_variant_df(\n        self: VariantAnnotation, df: DataFrame, cols: list[str]\n    ) -> VariantAnnotation:\n\"\"\"Filter variant annotation dataset by a variant dataframe.\n\n        Args:\n            df (DataFrame): A dataframe of variants\n            cols (List[str]): A list of columns to join on\n\n        Returns:\n            VariantAnnotation: A filtered variant annotation dataset\n        \"\"\"\n        self.df = self._df.join(f.broadcast(df.select(cols)), on=cols, how=\"inner\")\n        return self\n\n    def get_transcript_consequence_df(\n        self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n    ) -> DataFrame:\n\"\"\"Dataframe of exploded transcript consequences.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index. Defaults to None.\n\n        Returns:\n            DataFrame: A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence\n        \"\"\"\n        # exploding the array removes records without VEP annotation\n        transript_consequences = self.df.withColumn(\n            \"transcriptConsequence\", f.explode(\"vep.transcriptConsequences\")\n        ).select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"transcriptConsequence\",\n            f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n        )\n        if filter_by:\n            transript_consequences = transript_consequences.join(\n                f.broadcast(filter_by.df),\n                on=[\"chromosome\", \"geneId\"],\n            )\n        return transript_consequences.persist()\n\n    def get_most_severe_vep_v2g(\n        self: VariantAnnotation,\n        vep_consequences: DataFrame,\n        filter_by: GeneIndex,\n    ) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            vep_consequences (DataFrame): A dataframe of VEP consequences\n            filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n        Returns:\n            V2G: High and medium severity variant to gene assignments\n        \"\"\"\n        vep_lut = vep_consequences.select(\n            f.element_at(f.split(\"Accession\", r\"/\"), -1).alias(\n                \"variantFunctionalConsequenceId\"\n            ),\n            f.col(\"Term\").alias(\"label\"),\n            f.col(\"v2g_score\").cast(\"double\").alias(\"score\"),\n        )\n\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n                f.explode(\"transcriptConsequence.consequenceTerms\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"variantConsequence\").alias(\"datasourceId\"),\n            )\n            # A variant can have multiple predicted consequences on a transcript, the most severe one is selected\n            .join(\n                f.broadcast(vep_lut),\n                on=\"label\",\n                how=\"inner\",\n            )\n            .filter(f.col(\"score\") != 0)\n            .transform(\n                lambda df: get_record_with_maximum_value(\n                    df, [\"variantId\", \"geneId\"], \"score\"\n                )\n            )\n        )\n\n    def get_polyphen_v2g(\n        self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n    ) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.\n\n        Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n        Returns:\n            V2G: variant to gene assignments with their polyphen scores\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.polyphenScore\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                f.col(\"transcriptConsequence.polyphenScore\").alias(\"score\"),\n                f.col(\"transcriptConsequence.polyphenPrediction\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"polyphen\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_sift_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.\n\n        SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious.\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n\n        Returns:\n            V2G: variant to gene assignments with their SIFT scores\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.siftScore\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                f.expr(\"1 - transcriptConsequence.siftScore\").alias(\"score\"),\n                f.col(\"transcriptConsequence.siftPrediction\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"sift\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_plof_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n\n        Returns:\n            V2G: variant to gene assignments from the LOFTEE algorithm\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.lof\").isNotNull())\n            .withColumn(\n                \"isHighQualityPlof\",\n                f.when(f.col(\"transcriptConsequence.lof\") == \"HC\", True).when(\n                    f.col(\"transcriptConsequence.lof\") == \"LC\", False\n                ),\n            )\n            .withColumn(\n                \"score\",\n                f.when(f.col(\"isHighQualityPlof\"), 1.0).when(\n                    ~f.col(\"isHighQualityPlof\"), 0\n                ),\n            )\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                \"isHighQualityPlof\",\n                f.col(\"score\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"loftee\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_distance_to_tss(\n        self: VariantAnnotation,\n        filter_by: GeneIndex,\n        max_distance: int = 500_000,\n    ) -> V2G:\n\"\"\"Extracts variant to gene assignments for variants falling within a window of a gene's TSS.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n            max_distance (int): The maximum distance from the TSS to consider. Defaults to 500_000.\n\n        Returns:\n            V2G: variant to gene assignments with their distance to the TSS\n        \"\"\"\n        return V2G(\n            _df=self.df.alias(\"variant\")\n            .join(\n                f.broadcast(filter_by.locations_lut()).alias(\"gene\"),\n                on=[\n                    f.col(\"variant.chromosome\") == f.col(\"gene.chromosome\"),\n                    f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\"))\n                    <= max_distance,\n                ],\n                how=\"inner\",\n            )\n            .withColumn(\n                \"inverse_distance\",\n                max_distance - f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\")),\n            )\n            .transform(lambda df: normalise_column(df, \"inverse_distance\", \"score\"))\n            .select(\n                \"variantId\",\n                f.col(\"variant.chromosome\").alias(\"chromosome\"),\n                \"position\",\n                \"geneId\",\n                \"score\",\n                f.lit(\"distance\").alias(\"datatypeId\"),\n                f.lit(\"canonical_tss\").alias(\"datasourceId\"),\n            )\n        )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.filter_by_variant_df","title":"filter_by_variant_df(df, cols)","text":"

Filter variant annotation dataset by a variant dataframe.

Parameters:

Name Type Description Default df DataFrame

A dataframe of variants

required cols List[str]

A list of columns to join on

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

A filtered variant annotation dataset

Source code in src/otg/dataset/variant_annotation.py
def filter_by_variant_df(\n    self: VariantAnnotation, df: DataFrame, cols: list[str]\n) -> VariantAnnotation:\n\"\"\"Filter variant annotation dataset by a variant dataframe.\n\n    Args:\n        df (DataFrame): A dataframe of variants\n        cols (List[str]): A list of columns to join on\n\n    Returns:\n        VariantAnnotation: A filtered variant annotation dataset\n    \"\"\"\n    self.df = self._df.join(f.broadcast(df.select(cols)), on=cols, how=\"inner\")\n    return self\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.from_gnomad","title":"from_gnomad(gnomad_file, grch38_to_grch37_chain, populations) classmethod","text":"

Generate variant annotation dataset from gnomAD.

Some relevant modifications to the original dataset are:

  1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.
  2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.
  3. Field names are converted to camel case to follow the convention.

Parameters:

Name Type Description Default gnomad_file str

Path to gnomad.genomes.vX.X.X.sites.ht gnomAD dataset

required grch38_to_grch37_chain str

Path to chain file for liftover

required populations list

List of populations to include in the dataset

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

Variant annotation dataset

Source code in src/otg/dataset/variant_annotation.py
@classmethod\ndef from_gnomad(\n    cls: type[VariantAnnotation],\n    gnomad_file: str,\n    grch38_to_grch37_chain: str,\n    populations: list,\n) -> VariantAnnotation:\n\"\"\"Generate variant annotation dataset from gnomAD.\n\n    Some relevant modifications to the original dataset are:\n\n    1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.\n    2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.\n    3. Field names are converted to camel case to follow the convention.\n\n    Args:\n        gnomad_file (str): Path to `gnomad.genomes.vX.X.X.sites.ht` gnomAD dataset\n        grch38_to_grch37_chain (str): Path to chain file for liftover\n        populations (list): List of populations to include in the dataset\n\n    Returns:\n        VariantAnnotation: Variant annotation dataset\n    \"\"\"\n    # Load variants dataset\n    ht = hl.read_table(\n        gnomad_file,\n        _load_refs=False,\n    )\n\n    # Liftover\n    grch37 = hl.get_reference(\"GRCh37\")\n    grch38 = hl.get_reference(\"GRCh38\")\n    grch38.add_liftover(grch38_to_grch37_chain, grch37)\n\n    # Drop non biallelic variants\n    ht = ht.filter(ht.alleles.length() == 2)\n    # Liftover\n    ht = ht.annotate(locus_GRCh37=hl.liftover(ht.locus, \"GRCh37\"))\n    # Select relevant fields and nested records to create class\n    return cls(\n        _df=(\n            ht.select(\n                gnomad3VariantId=hl.str(\"-\").join(\n                    [\n                        ht.locus.contig.replace(\"chr\", \"\"),\n                        hl.str(ht.locus.position),\n                        ht.alleles[0],\n                        ht.alleles[1],\n                    ]\n                ),\n                chromosome=ht.locus.contig.replace(\"chr\", \"\"),\n                position=convert_gnomad_position_to_ensembl_hail(\n                    ht.locus.position, ht.alleles[0], ht.alleles[1]\n                ),\n                variantId=hl.str(\"_\").join(\n                    [\n                        ht.locus.contig.replace(\"chr\", \"\"),\n                        hl.str(\n                            convert_gnomad_position_to_ensembl_hail(\n                                ht.locus.position, ht.alleles[0], ht.alleles[1]\n                            )\n                        ),\n                        ht.alleles[0],\n                        ht.alleles[1],\n                    ]\n                ),\n                chromosomeB37=ht.locus_GRCh37.contig.replace(\"chr\", \"\"),\n                positionB37=ht.locus_GRCh37.position,\n                referenceAllele=ht.alleles[0],\n                alternateAllele=ht.alleles[1],\n                rsIds=ht.rsid,\n                alleleType=ht.allele_info.allele_type,\n                cadd=hl.struct(\n                    phred=ht.cadd.phred,\n                    raw=ht.cadd.raw_score,\n                ),\n                alleleFrequencies=hl.set([f\"{pop}-adj\" for pop in populations]).map(\n                    lambda p: hl.struct(\n                        populationName=p,\n                        alleleFrequency=ht.freq[ht.globals.freq_index_dict[p]].AF,\n                    )\n                ),\n                vep=hl.struct(\n                    mostSevereConsequence=ht.vep.most_severe_consequence,\n                    transcriptConsequences=hl.map(\n                        lambda x: hl.struct(\n                            aminoAcids=x.amino_acids,\n                            consequenceTerms=x.consequence_terms,\n                            geneId=x.gene_id,\n                            lof=x.lof,\n                            polyphenScore=x.polyphen_score,\n                            polyphenPrediction=x.polyphen_prediction,\n                            siftScore=x.sift_score,\n                            siftPrediction=x.sift_prediction,\n                        ),\n                        # Only keeping canonical transcripts\n                        ht.vep.transcript_consequences.filter(\n                            lambda x: (x.canonical == 1)\n                            & (x.gene_symbol_source == \"HGNC\")\n                        ),\n                    ),\n                ),\n            )\n            .key_by(\"chromosome\", \"position\")\n            .drop(\"locus\", \"alleles\")\n            .select_globals()\n            .to_spark(flatten=False)\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise VariantAnnotation from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

VariantAnnotation dataset

Source code in src/otg/dataset/variant_annotation.py
@classmethod\ndef from_parquet(\n    cls: type[VariantAnnotation], session: Session, path: str\n) -> VariantAnnotation:\n\"\"\"Initialise VariantAnnotation from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        VariantAnnotation: VariantAnnotation dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_distance_to_tss","title":"get_distance_to_tss(filter_by, max_distance=500000)","text":"

Extracts variant to gene assignments for variants falling within a window of a gene's TSS.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required max_distance int

The maximum distance from the TSS to consider. Defaults to 500_000.

500000

Returns:

Name Type Description V2G V2G

variant to gene assignments with their distance to the TSS

Source code in src/otg/dataset/variant_annotation.py
def get_distance_to_tss(\n    self: VariantAnnotation,\n    filter_by: GeneIndex,\n    max_distance: int = 500_000,\n) -> V2G:\n\"\"\"Extracts variant to gene assignments for variants falling within a window of a gene's TSS.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n        max_distance (int): The maximum distance from the TSS to consider. Defaults to 500_000.\n\n    Returns:\n        V2G: variant to gene assignments with their distance to the TSS\n    \"\"\"\n    return V2G(\n        _df=self.df.alias(\"variant\")\n        .join(\n            f.broadcast(filter_by.locations_lut()).alias(\"gene\"),\n            on=[\n                f.col(\"variant.chromosome\") == f.col(\"gene.chromosome\"),\n                f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\"))\n                <= max_distance,\n            ],\n            how=\"inner\",\n        )\n        .withColumn(\n            \"inverse_distance\",\n            max_distance - f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\")),\n        )\n        .transform(lambda df: normalise_column(df, \"inverse_distance\", \"score\"))\n        .select(\n            \"variantId\",\n            f.col(\"variant.chromosome\").alias(\"chromosome\"),\n            \"position\",\n            \"geneId\",\n            \"score\",\n            f.lit(\"distance\").alias(\"datatypeId\"),\n            f.lit(\"canonical_tss\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_most_severe_vep_v2g","title":"get_most_severe_vep_v2g(vep_consequences, filter_by)","text":"

Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default vep_consequences DataFrame

A dataframe of VEP consequences

required filter_by GeneIndex

A gene index to filter by. Defaults to None.

required

Returns:

Name Type Description V2G V2G

High and medium severity variant to gene assignments

Source code in src/otg/dataset/variant_annotation.py
def get_most_severe_vep_v2g(\n    self: VariantAnnotation,\n    vep_consequences: DataFrame,\n    filter_by: GeneIndex,\n) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        vep_consequences (DataFrame): A dataframe of VEP consequences\n        filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n    Returns:\n        V2G: High and medium severity variant to gene assignments\n    \"\"\"\n    vep_lut = vep_consequences.select(\n        f.element_at(f.split(\"Accession\", r\"/\"), -1).alias(\n            \"variantFunctionalConsequenceId\"\n        ),\n        f.col(\"Term\").alias(\"label\"),\n        f.col(\"v2g_score\").cast(\"double\").alias(\"score\"),\n    )\n\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n            f.explode(\"transcriptConsequence.consequenceTerms\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"variantConsequence\").alias(\"datasourceId\"),\n        )\n        # A variant can have multiple predicted consequences on a transcript, the most severe one is selected\n        .join(\n            f.broadcast(vep_lut),\n            on=\"label\",\n            how=\"inner\",\n        )\n        .filter(f.col(\"score\") != 0)\n        .transform(\n            lambda df: get_record_with_maximum_value(\n                df, [\"variantId\", \"geneId\"], \"score\"\n            )\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_plof_v2g","title":"get_plof_v2g(filter_by)","text":"

Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required

Returns:

Name Type Description V2G V2G

variant to gene assignments from the LOFTEE algorithm

Source code in src/otg/dataset/variant_annotation.py
def get_plof_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n\n    Returns:\n        V2G: variant to gene assignments from the LOFTEE algorithm\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.lof\").isNotNull())\n        .withColumn(\n            \"isHighQualityPlof\",\n            f.when(f.col(\"transcriptConsequence.lof\") == \"HC\", True).when(\n                f.col(\"transcriptConsequence.lof\") == \"LC\", False\n            ),\n        )\n        .withColumn(\n            \"score\",\n            f.when(f.col(\"isHighQualityPlof\"), 1.0).when(\n                ~f.col(\"isHighQualityPlof\"), 0\n            ),\n        )\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            \"isHighQualityPlof\",\n            f.col(\"score\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"loftee\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_polyphen_v2g","title":"get_polyphen_v2g(filter_by=None)","text":"

Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.

Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by. Defaults to None.

None

Returns:

Name Type Description V2G V2G

variant to gene assignments with their polyphen scores

Source code in src/otg/dataset/variant_annotation.py
def get_polyphen_v2g(\n    self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.\n\n    Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n    Returns:\n        V2G: variant to gene assignments with their polyphen scores\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.polyphenScore\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            f.col(\"transcriptConsequence.polyphenScore\").alias(\"score\"),\n            f.col(\"transcriptConsequence.polyphenPrediction\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"polyphen\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_sift_v2g","title":"get_sift_v2g(filter_by)","text":"

Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.

SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious. Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required

Returns:

Name Type Description V2G V2G

variant to gene assignments with their SIFT scores

Source code in src/otg/dataset/variant_annotation.py
def get_sift_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.\n\n    SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious.\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n\n    Returns:\n        V2G: variant to gene assignments with their SIFT scores\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.siftScore\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            f.expr(\"1 - transcriptConsequence.siftScore\").alias(\"score\"),\n            f.col(\"transcriptConsequence.siftPrediction\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"sift\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_transcript_consequence_df","title":"get_transcript_consequence_df(filter_by=None)","text":"

Dataframe of exploded transcript consequences.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index. Defaults to None.

None

Returns:

Name Type Description DataFrame DataFrame

A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence

Source code in src/otg/dataset/variant_annotation.py
def get_transcript_consequence_df(\n    self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n) -> DataFrame:\n\"\"\"Dataframe of exploded transcript consequences.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index. Defaults to None.\n\n    Returns:\n        DataFrame: A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence\n    \"\"\"\n    # exploding the array removes records without VEP annotation\n    transript_consequences = self.df.withColumn(\n        \"transcriptConsequence\", f.explode(\"vep.transcriptConsequences\")\n    ).select(\n        \"variantId\",\n        \"chromosome\",\n        \"position\",\n        \"transcriptConsequence\",\n        f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n    )\n    if filter_by:\n        transript_consequences = transript_consequences.join(\n            f.broadcast(filter_by.df),\n            on=[\"chromosome\", \"geneId\"],\n        )\n    return transript_consequences.persist()\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.max_maf","title":"max_maf()","text":"

Maximum minor allele frequency accross all populations.

Returns:

Name Type Description Column Column

Maximum minor allele frequency accross all populations.

Source code in src/otg/dataset/variant_annotation.py
def max_maf(self: VariantAnnotation) -> Column:\n\"\"\"Maximum minor allele frequency accross all populations.\n\n    Returns:\n        Column: Maximum minor allele frequency accross all populations.\n    \"\"\"\n    return f.array_max(\n        f.transform(\n            self.df.alleleFrequencies,\n            lambda af: f.when(\n                af.alleleFrequency > 0.5, 1 - af.alleleFrequency\n            ).otherwise(af.alleleFrequency),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.persist","title":"persist()","text":"

Persist DataFrame included in the Dataset.

Source code in src/otg/dataset/variant_annotation.py
def persist(self: VariantAnnotation) -> VariantAnnotation:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n    self.df = self._df.persist()\n    return self\n
"},{"location":"components/dataset/variant_annotation/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- gnomad3VariantId: string (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- chromosomeB37: string (nullable = true)\n |-- positionB37: integer (nullable = true)\n |-- alleleType: string (nullable = true)\n |-- rsIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- alleleFrequencies: array (nullable = false)\n |    |-- element: struct (containsNull = true)\n |    |    |-- populationName: string (nullable = true)\n |    |    |-- alleleFrequency: double (nullable = true)\n |-- cadd: struct (nullable = true)\n |    |-- phred: float (nullable = true)\n |    |-- raw: float (nullable = true)\n |-- vep: struct (nullable = false)\n |    |-- mostSevereConsequence: string (nullable = true)\n |    |-- transcriptConsequences: array (nullable = true)\n |    |    |-- element: struct (containsNull = true)\n |    |    |    |-- aminoAcids: string (nullable = true)\n |    |    |    |-- consequenceTerms: array (nullable = true)\n |    |    |    |    |-- element: string (containsNull = true)\n |    |    |    |-- geneId: string (nullable = true)\n |    |    |    |-- lof: string (nullable = true)\n |    |    |    |-- polyphenScore: double (nullable = true)\n |    |    |    |-- polyphenPrediction: string (nullable = true)\n |    |    |    |-- siftScore: double (nullable = true)\n |    |    |    |-- siftPrediction: string (nullable = true)\n
"},{"location":"components/dataset/variant_index/","title":"Variant index","text":"

Bases: Dataset

Variant index dataset.

Variant index dataset is the result of intersecting the variant annotation (gnomad) dataset with the variants with V2D available information.

Source code in src/otg/dataset/variant_index.py
@dataclass\nclass VariantIndex(Dataset):\n\"\"\"Variant index dataset.\n\n    Variant index dataset is the result of intersecting the variant annotation (gnomad) dataset with the variants with V2D available information.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"variant_index.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[VariantIndex], session: Session, path: str\n    ) -> VariantIndex:\n\"\"\"Initialise VariantIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            VariantIndex: VariantIndex dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_variant_annotation(\n        cls: type[VariantIndex],\n        variant_annotation: VariantAnnotation,\n    ) -> VariantIndex:\n\"\"\"Initialise VariantIndex from pre-existing variant annotation dataset.\"\"\"\n        unchanged_cols = [\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"chromosomeB37\",\n            \"positionB37\",\n            \"alleleType\",\n            \"alleleFrequencies\",\n            \"cadd\",\n        ]\n        vi = cls(\n            _df=variant_annotation.df.select(\n                *unchanged_cols,\n                f.col(\"vep.mostSevereConsequence\").alias(\"mostSevereConsequence\"),\n                # filters/rsid are arrays that can be empty, in this case we convert them to null\n                nullify_empty_array(f.col(\"rsIds\")).alias(\"rsIds\"),\n            ),\n        )\n        return VariantIndex(\n            _df=vi.df.repartition(\n                400,\n                \"chromosome\",\n            ).sortWithinPartitions(\"chromosome\", \"position\")\n        )\n\n    def persist(self: VariantIndex) -> VariantIndex:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n        self.df = self._df.persist()\n        return self\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise VariantIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description VariantIndex VariantIndex

VariantIndex dataset

Source code in src/otg/dataset/variant_index.py
@classmethod\ndef from_parquet(\n    cls: type[VariantIndex], session: Session, path: str\n) -> VariantIndex:\n\"\"\"Initialise VariantIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        VariantIndex: VariantIndex dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.from_variant_annotation","title":"from_variant_annotation(variant_annotation) classmethod","text":"

Initialise VariantIndex from pre-existing variant annotation dataset.

Source code in src/otg/dataset/variant_index.py
@classmethod\ndef from_variant_annotation(\n    cls: type[VariantIndex],\n    variant_annotation: VariantAnnotation,\n) -> VariantIndex:\n\"\"\"Initialise VariantIndex from pre-existing variant annotation dataset.\"\"\"\n    unchanged_cols = [\n        \"variantId\",\n        \"chromosome\",\n        \"position\",\n        \"referenceAllele\",\n        \"alternateAllele\",\n        \"chromosomeB37\",\n        \"positionB37\",\n        \"alleleType\",\n        \"alleleFrequencies\",\n        \"cadd\",\n    ]\n    vi = cls(\n        _df=variant_annotation.df.select(\n            *unchanged_cols,\n            f.col(\"vep.mostSevereConsequence\").alias(\"mostSevereConsequence\"),\n            # filters/rsid are arrays that can be empty, in this case we convert them to null\n            nullify_empty_array(f.col(\"rsIds\")).alias(\"rsIds\"),\n        ),\n    )\n    return VariantIndex(\n        _df=vi.df.repartition(\n            400,\n            \"chromosome\",\n        ).sortWithinPartitions(\"chromosome\", \"position\")\n    )\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.persist","title":"persist()","text":"

Persist DataFrame included in the Dataset.

Source code in src/otg/dataset/variant_index.py
def persist(self: VariantIndex) -> VariantIndex:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n    self.df = self._df.persist()\n    return self\n
"},{"location":"components/dataset/variant_index/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- chromosomeB37: string (nullable = true)\n |-- positionB37: integer (nullable = true)\n |-- alleleType: string (nullable = false)\n |-- alleleFrequencies: array (nullable = false)\n |    |-- element: struct (containsNull = true)\n |    |    |-- populationName: string (nullable = true)\n |    |    |-- alleleFrequency: double (nullable = true)\n |-- cadd: struct (nullable = true)\n |    |-- phred: float (nullable = true)\n |    |-- raw: float (nullable = true)\n |-- mostSevereConsequence: string (nullable = true)\n |-- rsIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n
"},{"location":"components/dataset/variant_to_gene/","title":"Variant to gene","text":"

Bases: Dataset

Variant-to-gene (V2G) evidence dataset.

A variant-to-gene (V2G) evidence is understood as any piece of evidence that supports the association of a variant with a likely causal gene. The evidence can sometimes be context-specific and refer to specific biofeatures (e.g. cell types)

Source code in src/otg/dataset/v2g.py
@dataclass\nclass V2G(Dataset):\n\"\"\"Variant-to-gene (V2G) evidence dataset.\n\n    A variant-to-gene (V2G) evidence is understood as any piece of evidence that supports the association of a variant with a likely causal gene. The evidence can sometimes be context-specific and refer to specific `biofeatures` (e.g. cell types)\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"v2g.json\")\n\n    @classmethod\n    def from_parquet(cls: type[V2G], session: Session, path: str) -> V2G:\n\"\"\"Initialise V2G from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            V2G: V2G dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def filter_by_genes(self: V2G, genes: GeneIndex) -> V2G:\n\"\"\"Filter by V2G dataset by genes.\n\n        Args:\n            genes (GeneIndex): Gene index dataset to filter by\n\n        Returns:\n            V2G: V2G dataset filtered by genes\n        \"\"\"\n        self.df = self._df.join(genes.df.select(\"geneId\"), on=\"geneId\", how=\"inner\")\n        return self\n
"},{"location":"components/dataset/variant_to_gene/#otg.dataset.v2g.V2G.filter_by_genes","title":"filter_by_genes(genes)","text":"

Filter by V2G dataset by genes.

Parameters:

Name Type Description Default genes GeneIndex

Gene index dataset to filter by

required

Returns:

Name Type Description V2G V2G

V2G dataset filtered by genes

Source code in src/otg/dataset/v2g.py
def filter_by_genes(self: V2G, genes: GeneIndex) -> V2G:\n\"\"\"Filter by V2G dataset by genes.\n\n    Args:\n        genes (GeneIndex): Gene index dataset to filter by\n\n    Returns:\n        V2G: V2G dataset filtered by genes\n    \"\"\"\n    self.df = self._df.join(genes.df.select(\"geneId\"), on=\"geneId\", how=\"inner\")\n    return self\n
"},{"location":"components/dataset/variant_to_gene/#otg.dataset.v2g.V2G.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise V2G from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description V2G V2G

V2G dataset

Source code in src/otg/dataset/v2g.py
@classmethod\ndef from_parquet(cls: type[V2G], session: Session, path: str) -> V2G:\n\"\"\"Initialise V2G from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        V2G: V2G dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_to_gene/#schema","title":"Schema","text":"
root\n |-- geneId: string (nullable = false)\n |-- variantId: string (nullable = false)\n |-- distance: long (nullable = true)\n |-- chromosome: string (nullable = false)\n |-- datatypeId: string (nullable = false)\n |-- datasourceId: string (nullable = false)\n |-- score: double (nullable = true)\n |-- resourceScore: double (nullable = true)\n |-- pmid: string (nullable = true)\n |-- biofeature: string (nullable = true)\n |-- position: integer (nullable = false)\n |-- label: string (nullable = true)\n |-- variantFunctionalConsequenceId: string (nullable = true)\n |-- isHighQualityPlof: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/_study_index/","title":"Study index","text":"

Bases: Dataset

Study index dataset.

A study index dataset captures all the metadata for all studies including GWAS and Molecular QTL.

Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndex(Dataset):\n\"\"\"Study index dataset.\n\n    A study index dataset captures all the metadata for all studies including GWAS and Molecular QTL.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"studies.json\")\n\n    @classmethod\n    def from_parquet(cls: type[StudyIndex], session: Session, path: str) -> StudyIndex:\n\"\"\"Initialise StudyIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyIndex: Study index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def study_type_lut(self: StudyIndex) -> DataFrame:\n\"\"\"Return a lookup table of study type.\n\n        Returns:\n            DataFrame: A dataframe containing `studyId` and `studyType` columns.\n        \"\"\"\n        return self.df.select(\"studyId\", \"studyType\")\n
"},{"location":"components/dataset/study_index/_study_index/#otg.dataset.study_index.StudyIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyIndex StudyIndex

Study index dataset

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_parquet(cls: type[StudyIndex], session: Session, path: str) -> StudyIndex:\n\"\"\"Initialise StudyIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyIndex: Study index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_index/_study_index/#otg.dataset.study_index.StudyIndex.study_type_lut","title":"study_type_lut()","text":"

Return a lookup table of study type.

Returns:

Name Type Description DataFrame DataFrame

A dataframe containing studyId and studyType columns.

Source code in src/otg/dataset/study_index.py
def study_type_lut(self: StudyIndex) -> DataFrame:\n\"\"\"Return a lookup table of study type.\n\n    Returns:\n        DataFrame: A dataframe containing `studyId` and `studyType` columns.\n    \"\"\"\n    return self.df.select(\"studyId\", \"studyType\")\n
"},{"location":"components/dataset/study_index/_study_index/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/study_index_finngen/","title":"Study index finngen","text":"

Bases: StudyIndex

Study index dataset from FinnGen.

The following information is aggregated/extracted:

  • Study ID in the special format (FINNGEN_R9_*)
  • Trait name (for example, Amoebiasis)
  • Number of cases and controls
  • Link to the summary statistics location

Some fields are also populated as constants, such as study type and the initial sample size.

Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndexFinnGen(StudyIndex):\n\"\"\"Study index dataset from FinnGen.\n\n    The following information is aggregated/extracted:\n\n    - Study ID in the special format (FINNGEN_R9_*)\n    - Trait name (for example, Amoebiasis)\n    - Number of cases and controls\n    - Link to the summary statistics location\n\n    Some fields are also populated as constants, such as study type and the initial sample size.\n    \"\"\"\n\n    @classmethod\n    def from_source(\n        cls: type[StudyIndexFinnGen],\n        finngen_studies: DataFrame,\n        finngen_release_prefix: str,\n        finngen_sumstat_url_prefix: str,\n        finngen_sumstat_url_suffix: str,\n    ) -> StudyIndexFinnGen:\n\"\"\"This function ingests study level metadata from FinnGen.\n\n        Args:\n            finngen_studies (DataFrame): FinnGen raw study table\n            finngen_release_prefix (str): Release prefix pattern.\n            finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n            finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n\n        Returns:\n            StudyIndexFinnGen: Parsed and annotated FinnGen study table.\n        \"\"\"\n        return cls(\n            _df=(\n                # Read FinnGen raw data.\n                finngen_studies.select(\n                    # Select the desired columns.\n                    f.concat(\n                        f.lit(finngen_release_prefix + \"_\"), f.col(\"phenocode\")\n                    ).alias(\"studyId\"),\n                    f.col(\"phenostring\").alias(\"traitFromSource\"),\n                    f.col(\"num_cases\").alias(\"nCases\"),\n                    f.col(\"num_controls\").alias(\"nControls\"),\n                    # Set constant value columns.\n                    f.lit(finngen_release_prefix).alias(\"projectId\"),\n                    f.lit(\"gwas\").alias(\"studyType\"),\n                    f.lit(True).alias(\"hasSumstats\"),\n                    f.lit(\"377,277 (210,870 females and 166,407 males)\").alias(\n                        \"initialSampleSize\"\n                    ),\n                )\n                .withColumn(\"nSamples\", f.col(\"nCases\") + f.col(\"nControls\"))\n                .withColumn(\n                    \"summarystatsLocation\",\n                    f.concat(\n                        f.lit(finngen_sumstat_url_prefix),\n                        f.col(\"studyId\"),\n                        f.lit(finngen_sumstat_url_suffix),\n                    ),\n                )\n            )\n        )\n
"},{"location":"components/dataset/study_index/study_index_finngen/#otg.dataset.study_index.StudyIndexFinnGen.from_source","title":"from_source(finngen_studies, finngen_release_prefix, finngen_sumstat_url_prefix, finngen_sumstat_url_suffix) classmethod","text":"

This function ingests study level metadata from FinnGen.

Parameters:

Name Type Description Default finngen_studies DataFrame

FinnGen raw study table

required finngen_release_prefix str

Release prefix pattern.

required finngen_sumstat_url_prefix str

URL prefix for summary statistics location.

required finngen_sumstat_url_suffix str

URL prefix suffix for summary statistics location.

required

Returns:

Name Type Description StudyIndexFinnGen StudyIndexFinnGen

Parsed and annotated FinnGen study table.

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_source(\n    cls: type[StudyIndexFinnGen],\n    finngen_studies: DataFrame,\n    finngen_release_prefix: str,\n    finngen_sumstat_url_prefix: str,\n    finngen_sumstat_url_suffix: str,\n) -> StudyIndexFinnGen:\n\"\"\"This function ingests study level metadata from FinnGen.\n\n    Args:\n        finngen_studies (DataFrame): FinnGen raw study table\n        finngen_release_prefix (str): Release prefix pattern.\n        finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n        finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n\n    Returns:\n        StudyIndexFinnGen: Parsed and annotated FinnGen study table.\n    \"\"\"\n    return cls(\n        _df=(\n            # Read FinnGen raw data.\n            finngen_studies.select(\n                # Select the desired columns.\n                f.concat(\n                    f.lit(finngen_release_prefix + \"_\"), f.col(\"phenocode\")\n                ).alias(\"studyId\"),\n                f.col(\"phenostring\").alias(\"traitFromSource\"),\n                f.col(\"num_cases\").alias(\"nCases\"),\n                f.col(\"num_controls\").alias(\"nControls\"),\n                # Set constant value columns.\n                f.lit(finngen_release_prefix).alias(\"projectId\"),\n                f.lit(\"gwas\").alias(\"studyType\"),\n                f.lit(True).alias(\"hasSumstats\"),\n                f.lit(\"377,277 (210,870 females and 166,407 males)\").alias(\n                    \"initialSampleSize\"\n                ),\n            )\n            .withColumn(\"nSamples\", f.col(\"nCases\") + f.col(\"nControls\"))\n            .withColumn(\n                \"summarystatsLocation\",\n                f.concat(\n                    f.lit(finngen_sumstat_url_prefix),\n                    f.col(\"studyId\"),\n                    f.lit(finngen_sumstat_url_suffix),\n                ),\n            )\n        )\n    )\n
"},{"location":"components/dataset/study_index/study_index_finngen/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/","title":"Study index gwas catalog","text":"

Bases: StudyIndex

Study index dataset from GWAS Catalog.

The following information is harmonised from the GWAS Catalog:

  • All publication related information retained.
  • Mapped measured and background traits parsed.
  • Flagged if harmonized summary statistics datasets available.
  • If available, the ftp path to these files presented.
  • Ancestries from the discovery and replication stages are structured with sample counts.
  • Case/control counts extracted.
  • The number of samples with European ancestry extracted.
Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndexGWASCatalog(StudyIndex):\n\"\"\"Study index dataset from GWAS Catalog.\n\n    The following information is harmonised from the GWAS Catalog:\n\n    - All publication related information retained.\n    - Mapped measured and background traits parsed.\n    - Flagged if harmonized summary statistics datasets available.\n    - If available, the ftp path to these files presented.\n    - Ancestries from the discovery and replication stages are structured with sample counts.\n    - Case/control counts extracted.\n    - The number of samples with European ancestry extracted.\n\n    \"\"\"\n\n    @staticmethod\n    def _gwas_ancestry_to_gnomad(gwas_catalog_ancestry: Column) -> Column:\n\"\"\"Normalised ancestry column from GWAS Catalog into Gnomad ancestry.\n\n        Args:\n            gwas_catalog_ancestry (Column): GWAS Catalog ancestry\n\n        Returns:\n            Column: mapped Gnomad ancestry using LUT\n        \"\"\"\n        # GWAS Catalog to p-value mapping\n        json_dict = json.loads(\n            pkg_resources.read_text(\n                data, \"gwascat_2_gnomad_superpopulation_map.json\", encoding=\"utf-8\"\n            )\n        )\n        map_expr = f.create_map(*[f.lit(x) for x in chain(*json_dict.items())])\n\n        return f.transform(gwas_catalog_ancestry, lambda x: map_expr[x])\n\n    @classmethod\n    def _parse_study_table(\n        cls: type[StudyIndexGWASCatalog], catalog_studies: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Harmonise GWASCatalog study table with `StudyIndex` schema.\n\n        Args:\n            catalog_studies (DataFrame): GWAS Catalog study table\n\n        Returns:\n            StudyIndexGWASCatalog:\n        \"\"\"\n        return cls(\n            _df=catalog_studies.select(\n                f.coalesce(\n                    f.col(\"STUDY ACCESSION\"), f.monotonically_increasing_id()\n                ).alias(\"studyId\"),\n                f.lit(\"GCST\").alias(\"projectId\"),\n                f.lit(\"gwas\").alias(\"studyType\"),\n                f.col(\"PUBMED ID\").alias(\"pubmedId\"),\n                f.col(\"FIRST AUTHOR\").alias(\"publicationFirstAuthor\"),\n                f.col(\"DATE\").alias(\"publicationDate\"),\n                f.col(\"JOURNAL\").alias(\"publicationJournal\"),\n                f.col(\"STUDY\").alias(\"publicationTitle\"),\n                f.coalesce(f.col(\"DISEASE/TRAIT\"), f.lit(\"Unreported\")).alias(\n                    \"traitFromSource\"\n                ),\n                f.col(\"INITIAL SAMPLE SIZE\").alias(\"initialSampleSize\"),\n                parse_efos(f.col(\"MAPPED_TRAIT_URI\")).alias(\"traitFromSourceMappedIds\"),\n                parse_efos(f.col(\"MAPPED BACKGROUND TRAIT URI\")).alias(\n                    \"backgroundTraitFromSourceMappedIds\"\n                ),\n            )\n        )\n\n    @classmethod\n    def from_source(\n        cls: type[StudyIndexGWASCatalog],\n        catalog_studies: DataFrame,\n        ancestry_file: DataFrame,\n        sumstats_lut: DataFrame,\n    ) -> StudyIndexGWASCatalog:\n\"\"\"This function ingests study level metadata from the GWAS Catalog.\n\n        Args:\n            catalog_studies (DataFrame): GWAS Catalog raw study table\n            ancestry_file (DataFrame): GWAS Catalog ancestry table.\n            sumstats_lut (DataFrame): GWAS Catalog summary statistics list.\n\n        Returns:\n            StudyIndexGWASCatalog: Parsed and annotated GWAS Catalog study table.\n        \"\"\"\n        # Read GWAS Catalogue raw data\n        return (\n            cls._parse_study_table(catalog_studies)\n            ._annotate_ancestries(ancestry_file)\n            ._annotate_sumstats_info(sumstats_lut)\n            ._annotate_discovery_sample_sizes()\n        )\n\n    def get_gnomad_ancestry_sample_sizes(self: StudyIndexGWASCatalog) -> DataFrame:\n\"\"\"Get all studies and their ancestries.\n\n        Returns:\n            DataFrame: containing `studyId`, `gnomadPopulation` and `relativeSampleSize` columns\n        \"\"\"\n        # Study ancestries\n        w_study = Window.partitionBy(\"studyId\")\n        return (\n            self.df\n            # Excluding studies where no sample discription is provided:\n            .filter(f.col(\"discoverySamples\").isNotNull())\n            # Exploding sample description and study identifier:\n            .withColumn(\"discoverySample\", f.explode(f.col(\"discoverySamples\")))\n            # Splitting sample descriptions further:\n            .withColumn(\n                \"ancestries\",\n                f.split(f.col(\"discoverySample.ancestry\"), r\",\\s(?![^()]*\\))\"),\n            )\n            # Dividing sample sizes assuming even distribution\n            .withColumn(\n                \"adjustedSampleSize\",\n                f.col(\"discoverySample.sampleSize\") / f.size(f.col(\"ancestries\")),\n            )\n            # mapped to gnomAD superpopulation and exploded\n            .withColumn(\n                \"gnomadPopulation\",\n                f.explode(\n                    StudyIndexGWASCatalog._gwas_ancestry_to_gnomad(f.col(\"ancestries\"))\n                ),\n            )\n            # Group by studies and aggregate for major population:\n            .groupBy(\"studyId\", \"gnomadPopulation\")\n            .agg(f.sum(f.col(\"adjustedSampleSize\")).alias(\"sampleSize\"))\n            # Calculate proportions for each study\n            .withColumn(\n                \"relativeSampleSize\",\n                f.col(\"sampleSize\") / f.sum(\"sampleSize\").over(w_study),\n            )\n            .drop(\"sampleSize\")\n        )\n\n    def update_study_id(\n        self: StudyIndexGWASCatalog, study_annotation: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n        Args:\n            study_annotation (DataFrame): Dataframe containing `updatedStudyId`, `traitFromSource`, `traitFromSourceMappedIds` and key column `studyId`.\n\n        Returns:\n            StudyIndexGWASCatalog: Updated study table.\n        \"\"\"\n        self.df = (\n            self._df.join(\n                study_annotation.select(\n                    *[\n                        f.col(c).alias(f\"updated{c}\")\n                        if c not in [\"studyId\", \"updatedStudyId\"]\n                        else f.col(c)\n                        for c in study_annotation.columns\n                    ]\n                ),\n                on=\"studyId\",\n                how=\"left\",\n            )\n            .withColumn(\n                \"studyId\",\n                f.coalesce(f.col(\"updatedStudyId\"), f.col(\"studyId\")),\n            )\n            .withColumn(\n                \"traitFromSource\",\n                f.coalesce(f.col(\"updatedtraitFromSource\"), f.col(\"traitFromSource\")),\n            )\n            .withColumn(\n                \"traitFromSourceMappedIds\",\n                f.coalesce(\n                    f.col(\"updatedtraitFromSourceMappedIds\"),\n                    f.col(\"traitFromSourceMappedIds\"),\n                ),\n            )\n            .select(self._df.columns)\n        )\n\n        return self\n\n    def _annotate_ancestries(\n        self: StudyIndexGWASCatalog, ancestry_lut: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Extracting sample sizes and ancestry information.\n\n        This function parses the ancestry data. Also get counts for the europeans in the same\n        discovery stage.\n\n        Args:\n            ancestry_lut (DataFrame): Ancestry table as downloaded from the GWAS Catalog\n\n        Returns:\n            StudyIndexGWASCatalog: Slimmed and cleaned version of the ancestry annotation.\n        \"\"\"\n        ancestry = (\n            ancestry_lut\n            # Convert column headers to camelcase:\n            .transform(\n                lambda df: df.select(\n                    *[f.expr(column2camel_case(x)) for x in df.columns]\n                )\n            ).withColumnRenamed(\n                \"studyAccession\", \"studyId\"\n            )  # studyId has not been split yet\n        )\n\n        # Get a high resolution dataset on experimental stage:\n        ancestry_stages = (\n            ancestry.groupBy(\"studyId\")\n            .pivot(\"stage\")\n            .agg(\n                f.collect_set(\n                    f.struct(\n                        f.col(\"numberOfIndividuals\").alias(\"sampleSize\"),\n                        f.col(\"broadAncestralCategory\").alias(\"ancestry\"),\n                    )\n                )\n            )\n            .withColumnRenamed(\"initial\", \"discoverySamples\")\n            .withColumnRenamed(\"replication\", \"replicationSamples\")\n            .persist()\n        )\n\n        # Generate information on the ancestry composition of the discovery stage, and calculate\n        # the proportion of the Europeans:\n        europeans_deconvoluted = (\n            ancestry\n            # Focus on discovery stage:\n            .filter(f.col(\"stage\") == \"initial\")\n            # Sorting ancestries if European:\n            .withColumn(\n                \"ancestryFlag\",\n                # Excluding finnish:\n                f.when(\n                    f.col(\"initialSampleDescription\").contains(\"Finnish\"),\n                    f.lit(\"other\"),\n                )\n                # Excluding Icelandic population:\n                .when(\n                    f.col(\"initialSampleDescription\").contains(\"Icelandic\"),\n                    f.lit(\"other\"),\n                )\n                # Including European ancestry:\n                .when(f.col(\"broadAncestralCategory\") == \"European\", f.lit(\"european\"))\n                # Exclude all other population:\n                .otherwise(\"other\"),\n            )\n            # Grouping by study accession and initial sample description:\n            .groupBy(\"studyId\")\n            .pivot(\"ancestryFlag\")\n            .agg(\n                # Summarizing sample sizes for all ancestries:\n                f.sum(f.col(\"numberOfIndividuals\"))\n            )\n            # Do arithmetics to make sure we have the right proportion of european in the set:\n            .withColumn(\n                \"initialSampleCountEuropean\",\n                f.when(f.col(\"european\").isNull(), f.lit(0)).otherwise(\n                    f.col(\"european\")\n                ),\n            )\n            .withColumn(\n                \"initialSampleCountOther\",\n                f.when(f.col(\"other\").isNull(), f.lit(0)).otherwise(f.col(\"other\")),\n            )\n            .withColumn(\n                \"initialSampleCount\",\n                f.col(\"initialSampleCountEuropean\") + f.col(\"other\"),\n            )\n            .drop(\n                \"european\",\n                \"other\",\n                \"initialSampleCount\",\n                \"initialSampleCountEuropean\",\n                \"initialSampleCountOther\",\n            )\n        )\n\n        parsed_ancestry_lut = ancestry_stages.join(\n            europeans_deconvoluted, on=\"studyId\", how=\"outer\"\n        )\n\n        self.df = self.df.join(parsed_ancestry_lut, on=\"studyId\", how=\"left\")\n        return self\n\n    def _annotate_sumstats_info(\n        self: StudyIndexGWASCatalog, sumstats_lut: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Annotate summary stat locations.\n\n        Args:\n            sumstats_lut (DataFrame): listing GWAS Catalog summary stats paths\n\n        Returns:\n            StudyIndexGWASCatalog: including `summarystatsLocation` and `hasSumstats` columns\n        \"\"\"\n        gwas_sumstats_base_uri = (\n            \"ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/\"\n        )\n\n        parsed_sumstats_lut = sumstats_lut.withColumn(\n            \"summarystatsLocation\",\n            f.concat(\n                f.lit(gwas_sumstats_base_uri),\n                f.regexp_replace(f.col(\"_c0\"), r\"^\\.\\/\", \"\"),\n            ),\n        ).select(\n            f.regexp_extract(f.col(\"summarystatsLocation\"), r\"\\/(GCST\\d+)\\/\", 1).alias(\n                \"studyId\"\n            ),\n            \"summarystatsLocation\",\n            f.lit(True).alias(\"hasSumstats\"),\n        )\n\n        self.df = (\n            self.df.drop(\"hasSumstats\")\n            .join(parsed_sumstats_lut, on=\"studyId\", how=\"left\")\n            .withColumn(\"hasSumstats\", f.coalesce(f.col(\"hasSumstats\"), f.lit(False)))\n        )\n        return self\n\n    def _annotate_discovery_sample_sizes(\n        self: StudyIndexGWASCatalog,\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Extract the sample size of the discovery stage of the study as annotated in the GWAS Catalog.\n\n        For some studies that measure quantitative traits, nCases and nControls can't be extracted. Therefore, we assume these are 0.\n\n        Returns:\n            StudyIndexGWASCatalog: object with columns `nCases`, `nControls`, and `nSamples` per `studyId` correctly extracted.\n        \"\"\"\n        sample_size_lut = (\n            self.df.select(\n                \"studyId\",\n                f.explode_outer(f.split(f.col(\"initialSampleSize\"), r\",\\s+\")).alias(\n                    \"samples\"\n                ),\n            )\n            # Extracting the sample size from the string:\n            .withColumn(\n                \"sampleSize\",\n                f.regexp_extract(\n                    f.regexp_replace(f.col(\"samples\"), \",\", \"\"), r\"[0-9,]+\", 0\n                ).cast(t.IntegerType()),\n            )\n            .select(\n                \"studyId\",\n                \"sampleSize\",\n                f.when(f.col(\"samples\").contains(\"cases\"), f.col(\"sampleSize\"))\n                .otherwise(f.lit(0))\n                .alias(\"nCases\"),\n                f.when(f.col(\"samples\").contains(\"controls\"), f.col(\"sampleSize\"))\n                .otherwise(f.lit(0))\n                .alias(\"nControls\"),\n            )\n            # Aggregating sample sizes for all ancestries:\n            .groupBy(\"studyId\")  # studyId has not been split yet\n            .agg(\n                f.sum(\"nCases\").alias(\"nCases\"),\n                f.sum(\"nControls\").alias(\"nControls\"),\n                f.sum(\"sampleSize\").alias(\"nSamples\"),\n            )\n        )\n        self.df = self.df.join(sample_size_lut, on=\"studyId\", how=\"left\")\n        return self\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.from_source","title":"from_source(catalog_studies, ancestry_file, sumstats_lut) classmethod","text":"

This function ingests study level metadata from the GWAS Catalog.

Parameters:

Name Type Description Default catalog_studies DataFrame

GWAS Catalog raw study table

required ancestry_file DataFrame

GWAS Catalog ancestry table.

required sumstats_lut DataFrame

GWAS Catalog summary statistics list.

required

Returns:

Name Type Description StudyIndexGWASCatalog StudyIndexGWASCatalog

Parsed and annotated GWAS Catalog study table.

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_source(\n    cls: type[StudyIndexGWASCatalog],\n    catalog_studies: DataFrame,\n    ancestry_file: DataFrame,\n    sumstats_lut: DataFrame,\n) -> StudyIndexGWASCatalog:\n\"\"\"This function ingests study level metadata from the GWAS Catalog.\n\n    Args:\n        catalog_studies (DataFrame): GWAS Catalog raw study table\n        ancestry_file (DataFrame): GWAS Catalog ancestry table.\n        sumstats_lut (DataFrame): GWAS Catalog summary statistics list.\n\n    Returns:\n        StudyIndexGWASCatalog: Parsed and annotated GWAS Catalog study table.\n    \"\"\"\n    # Read GWAS Catalogue raw data\n    return (\n        cls._parse_study_table(catalog_studies)\n        ._annotate_ancestries(ancestry_file)\n        ._annotate_sumstats_info(sumstats_lut)\n        ._annotate_discovery_sample_sizes()\n    )\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.get_gnomad_ancestry_sample_sizes","title":"get_gnomad_ancestry_sample_sizes()","text":"

Get all studies and their ancestries.

Returns:

Name Type Description DataFrame DataFrame

containing studyId, gnomadPopulation and relativeSampleSize columns

Source code in src/otg/dataset/study_index.py
def get_gnomad_ancestry_sample_sizes(self: StudyIndexGWASCatalog) -> DataFrame:\n\"\"\"Get all studies and their ancestries.\n\n    Returns:\n        DataFrame: containing `studyId`, `gnomadPopulation` and `relativeSampleSize` columns\n    \"\"\"\n    # Study ancestries\n    w_study = Window.partitionBy(\"studyId\")\n    return (\n        self.df\n        # Excluding studies where no sample discription is provided:\n        .filter(f.col(\"discoverySamples\").isNotNull())\n        # Exploding sample description and study identifier:\n        .withColumn(\"discoverySample\", f.explode(f.col(\"discoverySamples\")))\n        # Splitting sample descriptions further:\n        .withColumn(\n            \"ancestries\",\n            f.split(f.col(\"discoverySample.ancestry\"), r\",\\s(?![^()]*\\))\"),\n        )\n        # Dividing sample sizes assuming even distribution\n        .withColumn(\n            \"adjustedSampleSize\",\n            f.col(\"discoverySample.sampleSize\") / f.size(f.col(\"ancestries\")),\n        )\n        # mapped to gnomAD superpopulation and exploded\n        .withColumn(\n            \"gnomadPopulation\",\n            f.explode(\n                StudyIndexGWASCatalog._gwas_ancestry_to_gnomad(f.col(\"ancestries\"))\n            ),\n        )\n        # Group by studies and aggregate for major population:\n        .groupBy(\"studyId\", \"gnomadPopulation\")\n        .agg(f.sum(f.col(\"adjustedSampleSize\")).alias(\"sampleSize\"))\n        # Calculate proportions for each study\n        .withColumn(\n            \"relativeSampleSize\",\n            f.col(\"sampleSize\") / f.sum(\"sampleSize\").over(w_study),\n        )\n        .drop(\"sampleSize\")\n    )\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.update_study_id","title":"update_study_id(study_annotation)","text":"

Update studyId with a dataframe containing study.

Parameters:

Name Type Description Default study_annotation DataFrame

Dataframe containing updatedStudyId, traitFromSource, traitFromSourceMappedIds and key column studyId.

required

Returns:

Name Type Description StudyIndexGWASCatalog StudyIndexGWASCatalog

Updated study table.

Source code in src/otg/dataset/study_index.py
def update_study_id(\n    self: StudyIndexGWASCatalog, study_annotation: DataFrame\n) -> StudyIndexGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n    Args:\n        study_annotation (DataFrame): Dataframe containing `updatedStudyId`, `traitFromSource`, `traitFromSourceMappedIds` and key column `studyId`.\n\n    Returns:\n        StudyIndexGWASCatalog: Updated study table.\n    \"\"\"\n    self.df = (\n        self._df.join(\n            study_annotation.select(\n                *[\n                    f.col(c).alias(f\"updated{c}\")\n                    if c not in [\"studyId\", \"updatedStudyId\"]\n                    else f.col(c)\n                    for c in study_annotation.columns\n                ]\n            ),\n            on=\"studyId\",\n            how=\"left\",\n        )\n        .withColumn(\n            \"studyId\",\n            f.coalesce(f.col(\"updatedStudyId\"), f.col(\"studyId\")),\n        )\n        .withColumn(\n            \"traitFromSource\",\n            f.coalesce(f.col(\"updatedtraitFromSource\"), f.col(\"traitFromSource\")),\n        )\n        .withColumn(\n            \"traitFromSourceMappedIds\",\n            f.coalesce(\n                f.col(\"updatedtraitFromSourceMappedIds\"),\n                f.col(\"traitFromSourceMappedIds\"),\n            ),\n        )\n        .select(self._df.columns)\n    )\n\n    return self\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_locus/_study_locus/","title":"Study-locus","text":"

Bases: Dataset

Study-Locus dataset.

This dataset captures associations between study/traits and a genetic loci as provided by finemapping methods.

Source code in src/otg/dataset/study_locus.py
@dataclass\nclass StudyLocus(Dataset):\n\"\"\"Study-Locus dataset.\n\n    This dataset captures associations between study/traits and a genetic loci as provided by finemapping methods.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"study_locus.json\")\n\n    @staticmethod\n    def _overlapping_peaks(credset_to_overlap: DataFrame) -> DataFrame:\n\"\"\"Calculate overlapping signals (study-locus) between GWAS-GWAS and GWAS-Molecular trait.\n\n        Args:\n            credset_to_overlap (DataFrame): DataFrame containing at least `studyLocusId`, `studyType`, `chromosome` and `tagVariantId` columns.\n\n        Returns:\n            DataFrame: containing `left_studyLocusId`, `right_studyLocusId` and `chromosome` columns.\n        \"\"\"\n        # Reduce columns to the minimum to reduce the size of the dataframe\n        credset_to_overlap = credset_to_overlap.select(\n            \"studyLocusId\", \"studyType\", \"chromosome\", \"tagVariantId\"\n        )\n        return (\n            credset_to_overlap.alias(\"left\")\n            .filter(f.col(\"studyType\") == \"gwas\")\n            # Self join with complex condition. Left it's all gwas and right can be gwas or molecular trait\n            .join(\n                credset_to_overlap.alias(\"right\"),\n                on=[\n                    f.col(\"left.chromosome\") == f.col(\"right.chromosome\"),\n                    f.col(\"left.tagVariantId\") == f.col(\"right.tagVariantId\"),\n                    (f.col(\"right.studyType\") != \"gwas\")\n                    | (f.col(\"left.studyLocusId\") > f.col(\"right.studyLocusId\")),\n                ],\n                how=\"inner\",\n            )\n            .select(\n                f.col(\"left.studyLocusId\").alias(\"left_studyLocusId\"),\n                f.col(\"right.studyLocusId\").alias(\"right_studyLocusId\"),\n                f.col(\"left.chromosome\").alias(\"chromosome\"),\n            )\n            .distinct()\n            .repartition(\"chromosome\")\n            .persist()\n        )\n\n    @staticmethod\n    def _align_overlapping_tags(\n        credset_to_overlap: DataFrame, peak_overlaps: DataFrame\n    ) -> StudyLocusOverlap:\n\"\"\"Align overlapping tags in pairs of overlapping study-locus, keeping all tags in both loci.\n\n        Args:\n            credset_to_overlap (DataFrame): containing `studyLocusId`, `studyType`, `chromosome`, `tagVariantId`, `logABF` and `posteriorProbability` columns.\n            peak_overlaps (DataFrame): containing `left_studyLocusId`, `right_studyLocusId` and `chromosome` columns.\n\n        Returns:\n            StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n        \"\"\"\n        # Complete information about all tags in the left study-locus of the overlap\n        overlapping_left = credset_to_overlap.select(\n            f.col(\"chromosome\"),\n            f.col(\"tagVariantId\"),\n            f.col(\"studyLocusId\").alias(\"left_studyLocusId\"),\n            f.col(\"logABF\").alias(\"left_logABF\"),\n            f.col(\"posteriorProbability\").alias(\"left_posteriorProbability\"),\n        ).join(peak_overlaps, on=[\"chromosome\", \"left_studyLocusId\"], how=\"inner\")\n\n        # Complete information about all tags in the right study-locus of the overlap\n        overlapping_right = credset_to_overlap.select(\n            f.col(\"chromosome\"),\n            f.col(\"tagVariantId\"),\n            f.col(\"studyLocusId\").alias(\"right_studyLocusId\"),\n            f.col(\"logABF\").alias(\"right_logABF\"),\n            f.col(\"posteriorProbability\").alias(\"right_posteriorProbability\"),\n        ).join(peak_overlaps, on=[\"chromosome\", \"right_studyLocusId\"], how=\"inner\")\n\n        # Include information about all tag variants in both study-locus aligned by tag variant id\n        return StudyLocusOverlap(\n            _df=overlapping_left.join(\n                overlapping_right,\n                on=[\n                    \"chromosome\",\n                    \"right_studyLocusId\",\n                    \"left_studyLocusId\",\n                    \"tagVariantId\",\n                ],\n                how=\"outer\",\n            )\n            # ensures nullable=false for following columns\n            .fillna(\n                value=\"unknown\",\n                subset=[\n                    \"chromosome\",\n                    \"right_studyLocusId\",\n                    \"left_studyLocusId\",\n                    \"tagVariantId\",\n                ],\n            )\n        )\n\n    @staticmethod\n    def _update_quality_flag(\n        qc: Column, flag_condition: Column, flag_text: StudyLocusQualityCheck\n    ) -> Column:\n\"\"\"Update the provided quality control list with a new flag if condition is met.\n\n        Args:\n            qc (Column): Array column with the current list of qc flags.\n            flag_condition (Column): This is a column of booleans, signing which row should be flagged\n            flag_text (StudyLocusQualityCheck): Text for the new quality control flag\n\n        Returns:\n            Column: Array column with the updated list of qc flags.\n        \"\"\"\n        qc = f.when(qc.isNull(), f.array()).otherwise(qc)\n        return f.when(\n            flag_condition,\n            f.array_union(qc, f.array(f.lit(flag_text.value))),\n        ).otherwise(qc)\n\n    @classmethod\n    def from_parquet(cls: type[StudyLocus], session: Session, path: str) -> StudyLocus:\n\"\"\"Initialise StudyLocus from parquet file.\n\n        Args:\n            session (Session): spark session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyLocus: Study-locus dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def credible_set(\n        self: StudyLocus,\n        credible_interval: CredibleInterval,\n    ) -> StudyLocus:\n\"\"\"Filter study-locus tag variants based on given credible interval.\n\n        Args:\n            credible_interval (CredibleInterval): Credible interval to filter for.\n\n        Returns:\n            StudyLocus: Filtered study-locus dataset.\n        \"\"\"\n        self.df = self._df.withColumn(\n            \"credibleSet\",\n            f.expr(f\"filter(credibleSet, tag -> (tag.{credible_interval.value}))\"),\n        )\n        return self\n\n    def overlaps(self: StudyLocus, study_index: StudyIndex) -> StudyLocusOverlap:\n\"\"\"Calculate overlapping study-locus.\n\n        Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always\n        appearing on the right side.\n\n        Args:\n            study_index (StudyIndex): Study index to resolve study types.\n\n        Returns:\n            StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n        \"\"\"\n        credset_to_overlap = (\n            self.df.join(study_index.study_type_lut(), on=\"studyId\", how=\"inner\")\n            .withColumn(\"credibleSet\", f.explode(\"credibleSet\"))\n            .select(\n                \"studyLocusId\",\n                \"studyType\",\n                \"chromosome\",\n                f.col(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n                f.col(\"credibleSet.logABF\").alias(\"logABF\"),\n                f.col(\"credibleSet.posteriorProbability\").alias(\"posteriorProbability\"),\n            )\n            .persist()\n        )\n\n        # overlapping study-locus\n        peak_overlaps = self._overlapping_peaks(credset_to_overlap)\n\n        # study-locus overlap by aligning overlapping variants\n        return self._align_overlapping_tags(credset_to_overlap, peak_overlaps)\n\n    def unique_lead_tag_variants(self: StudyLocus) -> DataFrame:\n\"\"\"All unique lead and tag variants contained in the `StudyLocus` dataframe.\n\n        Returns:\n            DataFrame: A dataframe containing `variantId` and `chromosome` columns.\n        \"\"\"\n        lead_tags = (\n            self.df.select(\n                f.col(\"variantId\"),\n                f.col(\"chromosome\"),\n                f.explode(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n            )\n            .repartition(\"chromosome\")\n            .persist()\n        )\n        return (\n            lead_tags.select(\"variantId\", \"chromosome\")\n            .union(\n                lead_tags.select(f.col(\"tagVariantId\").alias(\"variantId\"), \"chromosome\")\n            )\n            .distinct()\n        )\n\n    def unique_study_locus_ancestries(\n        self: StudyLocus, studies: StudyIndexGWASCatalog\n    ) -> DataFrame:\n\"\"\"All unique lead variant and ancestries contained in the `StudyLocus`.\n\n        Args:\n            studies (StudyIndexGWASCatalog): Metadata about studies in the `StudyLocus`.\n\n        Returns:\n            DataFrame: unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]\n\n        Note:\n            This method is only available for GWAS Catalog studies.\n        \"\"\"\n        return (\n            self.df.join(\n                studies.get_gnomad_ancestry_sample_sizes(), on=\"studyId\", how=\"left\"\n            )\n            .filter(f.col(\"position\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"studyId\",\n                \"gnomadPopulation\",\n                \"relativeSampleSize\",\n            )\n            .distinct()\n        )\n\n    def neglog_pvalue(self: StudyLocus) -> Column:\n\"\"\"Returns the negative log p-value.\n\n        Returns:\n            Column: Negative log p-value\n        \"\"\"\n        return calculate_neglog_pvalue(\n            self.df.pValueMantissa,\n            self.df.pValueExponent,\n        )\n\n    def annotate_credible_sets(self: StudyLocus) -> StudyLocus:\n\"\"\"Annotate study-locus dataset with credible set flags.\n\n        Sorts the array in the `credibleSet` column elements by their `posteriorProbability` values in descending order and adds\n        `is95CredibleSet` and `is99CredibleSet` fields to the elements, indicating which are the tagging variants whose cumulative sum\n        of their `posteriorProbability` values is below 0.95 and 0.99, respectively.\n\n        Returns:\n            StudyLocus: including annotation on `is95CredibleSet` and `is99CredibleSet`.\n        \"\"\"\n        self.df = self.df.withColumn(\n            # Sort credible set by posterior probability in descending order\n            \"credibleSet\",\n            f.when(\n                f.size(f.col(\"credibleSet\")) > 0,\n                order_array_of_structs_by_field(\"credibleSet\", \"posteriorProbability\"),\n            ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n        ).withColumn(\n            # Calculate array of cumulative sums of posterior probabilities to determine which variants are in the 95% and 99% credible sets\n            # and zip the cumulative sums array with the credible set array to add the flags\n            \"credibleSet\",\n            f.when(\n                f.size(f.col(\"credibleSet\")) > 0,\n                f.zip_with(\n                    f.col(\"credibleSet\"),\n                    f.transform(\n                        f.sequence(f.lit(1), f.size(f.col(\"credibleSet\"))),\n                        lambda index: f.aggregate(\n                            f.slice(\n                                # By using `index - 1` we introduce a value of `0.0` in the cumulative sums array. to ensure that the last variant\n                                # that exceeds the 0.95 threshold is included in the cumulative sum, as its probability is necessary to satisfy the threshold.\n                                f.col(\"credibleSet.posteriorProbability\"),\n                                1,\n                                index - 1,\n                            ),\n                            f.lit(0.0),\n                            lambda acc, el: acc + el,\n                        ),\n                    ),\n                    lambda struct_e, acc: struct_e.withField(\n                        CredibleInterval.IS95.value, acc < 0.95\n                    ).withField(CredibleInterval.IS99.value, acc < 0.99),\n                ),\n            ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n        )\n        return self\n\n    def clump(self: StudyLocus) -> StudyLocus:\n\"\"\"Perform LD clumping of the studyLocus.\n\n        Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n        Returns:\n            StudyLocus: with empty credible sets for linked variants and QC flag.\n        \"\"\"\n        self.df = (\n            self.df.withColumn(\n                \"is_lead_linked\",\n                LDclumping._is_lead_linked(\n                    self.df.studyId,\n                    self.df.variantId,\n                    self.df.pValueExponent,\n                    self.df.pValueMantissa,\n                    self.df.credibleSet,\n                ),\n            )\n            .withColumn(\n                \"credibleSet\",\n                f.when(f.col(\"is_lead_linked\"), f.array()).otherwise(\n                    f.col(\"credibleSet\")\n                ),\n            )\n            .withColumn(\n                \"qualityControls\",\n                StudyLocus._update_quality_flag(\n                    f.col(\"qualityControls\"),\n                    f.col(\"is_lead_linked\"),\n                    StudyLocusQualityCheck.LD_CLUMPED,\n                ),\n            )\n            .drop(\"is_lead_linked\")\n        )\n        return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.annotate_credible_sets","title":"annotate_credible_sets()","text":"

Annotate study-locus dataset with credible set flags.

Sorts the array in the credibleSet column elements by their posteriorProbability values in descending order and adds is95CredibleSet and is99CredibleSet fields to the elements, indicating which are the tagging variants whose cumulative sum of their posteriorProbability values is below 0.95 and 0.99, respectively.

Returns:

Name Type Description StudyLocus StudyLocus

including annotation on is95CredibleSet and is99CredibleSet.

Source code in src/otg/dataset/study_locus.py
def annotate_credible_sets(self: StudyLocus) -> StudyLocus:\n\"\"\"Annotate study-locus dataset with credible set flags.\n\n    Sorts the array in the `credibleSet` column elements by their `posteriorProbability` values in descending order and adds\n    `is95CredibleSet` and `is99CredibleSet` fields to the elements, indicating which are the tagging variants whose cumulative sum\n    of their `posteriorProbability` values is below 0.95 and 0.99, respectively.\n\n    Returns:\n        StudyLocus: including annotation on `is95CredibleSet` and `is99CredibleSet`.\n    \"\"\"\n    self.df = self.df.withColumn(\n        # Sort credible set by posterior probability in descending order\n        \"credibleSet\",\n        f.when(\n            f.size(f.col(\"credibleSet\")) > 0,\n            order_array_of_structs_by_field(\"credibleSet\", \"posteriorProbability\"),\n        ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n    ).withColumn(\n        # Calculate array of cumulative sums of posterior probabilities to determine which variants are in the 95% and 99% credible sets\n        # and zip the cumulative sums array with the credible set array to add the flags\n        \"credibleSet\",\n        f.when(\n            f.size(f.col(\"credibleSet\")) > 0,\n            f.zip_with(\n                f.col(\"credibleSet\"),\n                f.transform(\n                    f.sequence(f.lit(1), f.size(f.col(\"credibleSet\"))),\n                    lambda index: f.aggregate(\n                        f.slice(\n                            # By using `index - 1` we introduce a value of `0.0` in the cumulative sums array. to ensure that the last variant\n                            # that exceeds the 0.95 threshold is included in the cumulative sum, as its probability is necessary to satisfy the threshold.\n                            f.col(\"credibleSet.posteriorProbability\"),\n                            1,\n                            index - 1,\n                        ),\n                        f.lit(0.0),\n                        lambda acc, el: acc + el,\n                    ),\n                ),\n                lambda struct_e, acc: struct_e.withField(\n                    CredibleInterval.IS95.value, acc < 0.95\n                ).withField(CredibleInterval.IS99.value, acc < 0.99),\n            ),\n        ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.clump","title":"clump()","text":"

Perform LD clumping of the studyLocus.

Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.

Returns:

Name Type Description StudyLocus StudyLocus

with empty credible sets for linked variants and QC flag.

Source code in src/otg/dataset/study_locus.py
def clump(self: StudyLocus) -> StudyLocus:\n\"\"\"Perform LD clumping of the studyLocus.\n\n    Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n    Returns:\n        StudyLocus: with empty credible sets for linked variants and QC flag.\n    \"\"\"\n    self.df = (\n        self.df.withColumn(\n            \"is_lead_linked\",\n            LDclumping._is_lead_linked(\n                self.df.studyId,\n                self.df.variantId,\n                self.df.pValueExponent,\n                self.df.pValueMantissa,\n                self.df.credibleSet,\n            ),\n        )\n        .withColumn(\n            \"credibleSet\",\n            f.when(f.col(\"is_lead_linked\"), f.array()).otherwise(\n                f.col(\"credibleSet\")\n            ),\n        )\n        .withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.col(\"is_lead_linked\"),\n                StudyLocusQualityCheck.LD_CLUMPED,\n            ),\n        )\n        .drop(\"is_lead_linked\")\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.credible_set","title":"credible_set(credible_interval)","text":"

Filter study-locus tag variants based on given credible interval.

Parameters:

Name Type Description Default credible_interval CredibleInterval

Credible interval to filter for.

required

Returns:

Name Type Description StudyLocus StudyLocus

Filtered study-locus dataset.

Source code in src/otg/dataset/study_locus.py
def credible_set(\n    self: StudyLocus,\n    credible_interval: CredibleInterval,\n) -> StudyLocus:\n\"\"\"Filter study-locus tag variants based on given credible interval.\n\n    Args:\n        credible_interval (CredibleInterval): Credible interval to filter for.\n\n    Returns:\n        StudyLocus: Filtered study-locus dataset.\n    \"\"\"\n    self.df = self._df.withColumn(\n        \"credibleSet\",\n        f.expr(f\"filter(credibleSet, tag -> (tag.{credible_interval.value}))\"),\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyLocus from parquet file.

Parameters:

Name Type Description Default session Session

spark session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyLocus StudyLocus

Study-locus dataset

Source code in src/otg/dataset/study_locus.py
@classmethod\ndef from_parquet(cls: type[StudyLocus], session: Session, path: str) -> StudyLocus:\n\"\"\"Initialise StudyLocus from parquet file.\n\n    Args:\n        session (Session): spark session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyLocus: Study-locus dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.neglog_pvalue","title":"neglog_pvalue()","text":"

Returns the negative log p-value.

Returns:

Name Type Description Column Column

Negative log p-value

Source code in src/otg/dataset/study_locus.py
def neglog_pvalue(self: StudyLocus) -> Column:\n\"\"\"Returns the negative log p-value.\n\n    Returns:\n        Column: Negative log p-value\n    \"\"\"\n    return calculate_neglog_pvalue(\n        self.df.pValueMantissa,\n        self.df.pValueExponent,\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.overlaps","title":"overlaps(study_index)","text":"

Calculate overlapping study-locus.

Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always appearing on the right side.

Parameters:

Name Type Description Default study_index StudyIndex

Study index to resolve study types.

required

Returns:

Name Type Description StudyLocusOverlap StudyLocusOverlap

Pairs of overlapping study-locus with aligned tags.

Source code in src/otg/dataset/study_locus.py
def overlaps(self: StudyLocus, study_index: StudyIndex) -> StudyLocusOverlap:\n\"\"\"Calculate overlapping study-locus.\n\n    Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always\n    appearing on the right side.\n\n    Args:\n        study_index (StudyIndex): Study index to resolve study types.\n\n    Returns:\n        StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n    \"\"\"\n    credset_to_overlap = (\n        self.df.join(study_index.study_type_lut(), on=\"studyId\", how=\"inner\")\n        .withColumn(\"credibleSet\", f.explode(\"credibleSet\"))\n        .select(\n            \"studyLocusId\",\n            \"studyType\",\n            \"chromosome\",\n            f.col(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n            f.col(\"credibleSet.logABF\").alias(\"logABF\"),\n            f.col(\"credibleSet.posteriorProbability\").alias(\"posteriorProbability\"),\n        )\n        .persist()\n    )\n\n    # overlapping study-locus\n    peak_overlaps = self._overlapping_peaks(credset_to_overlap)\n\n    # study-locus overlap by aligning overlapping variants\n    return self._align_overlapping_tags(credset_to_overlap, peak_overlaps)\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.unique_lead_tag_variants","title":"unique_lead_tag_variants()","text":"

All unique lead and tag variants contained in the StudyLocus dataframe.

Returns:

Name Type Description DataFrame DataFrame

A dataframe containing variantId and chromosome columns.

Source code in src/otg/dataset/study_locus.py
def unique_lead_tag_variants(self: StudyLocus) -> DataFrame:\n\"\"\"All unique lead and tag variants contained in the `StudyLocus` dataframe.\n\n    Returns:\n        DataFrame: A dataframe containing `variantId` and `chromosome` columns.\n    \"\"\"\n    lead_tags = (\n        self.df.select(\n            f.col(\"variantId\"),\n            f.col(\"chromosome\"),\n            f.explode(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n        )\n        .repartition(\"chromosome\")\n        .persist()\n    )\n    return (\n        lead_tags.select(\"variantId\", \"chromosome\")\n        .union(\n            lead_tags.select(f.col(\"tagVariantId\").alias(\"variantId\"), \"chromosome\")\n        )\n        .distinct()\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.unique_study_locus_ancestries","title":"unique_study_locus_ancestries(studies)","text":"

All unique lead variant and ancestries contained in the StudyLocus.

Parameters:

Name Type Description Default studies StudyIndexGWASCatalog

Metadata about studies in the StudyLocus.

required

Returns:

Name Type Description DataFrame DataFrame

unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]

Note

This method is only available for GWAS Catalog studies.

Source code in src/otg/dataset/study_locus.py
def unique_study_locus_ancestries(\n    self: StudyLocus, studies: StudyIndexGWASCatalog\n) -> DataFrame:\n\"\"\"All unique lead variant and ancestries contained in the `StudyLocus`.\n\n    Args:\n        studies (StudyIndexGWASCatalog): Metadata about studies in the `StudyLocus`.\n\n    Returns:\n        DataFrame: unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]\n\n    Note:\n        This method is only available for GWAS Catalog studies.\n    \"\"\"\n    return (\n        self.df.join(\n            studies.get_gnomad_ancestry_sample_sizes(), on=\"studyId\", how=\"left\"\n        )\n        .filter(f.col(\"position\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"studyId\",\n            \"gnomadPopulation\",\n            \"relativeSampleSize\",\n        )\n        .distinct()\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#schema","title":"Schema","text":"
root\n |-- studyLocusId: long (nullable = false)\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = true)\n |-- position: integer (nullable = true)\n |-- studyId: string (nullable = false)\n |-- beta: double (nullable = true)\n |-- oddsRatio: double (nullable = true)\n |-- oddsRatioConfidenceIntervalLower: double (nullable = true)\n |-- oddsRatioConfidenceIntervalUpper: double (nullable = true)\n |-- betaConfidenceIntervalLower: double (nullable = true)\n |-- betaConfidenceIntervalUpper: double (nullable = true)\n |-- pValueMantissa: float (nullable = true)\n |-- pValueExponent: integer (nullable = true)\n |-- effectAlleleFrequencyFromSource: double (nullable = true)\n |-- standardError: double (nullable = true)\n |-- subStudyDescription: string (nullable = true)\n |-- qualityControls: array (nullable = true)\n |    |-- element: string (containsNull = false)\n |-- finemappingMethod: string (nullable = true)\n |-- credibleSet: array (nullable = true)\n |    |-- element: struct (containsNull = true)\n |    |    |-- is95CredibleSet: boolean (nullable = true)\n |    |    |-- is99CredibleSet: boolean (nullable = true)\n |    |    |-- logABF: double (nullable = true)\n |    |    |-- posteriorProbability: double (nullable = true)\n |    |    |-- tagVariantId: string (nullable = true)\n |    |    |-- tagPValue: double (nullable = true)\n |    |    |-- tagPValueConditioned: double (nullable = true)\n |    |    |-- tagBeta: double (nullable = true)\n |    |    |-- tagStandardError: double (nullable = true)\n |    |    |-- tagBetaConditioned: double (nullable = true)\n |    |    |-- tagStandardErrorConditioned: double (nullable = true)\n |    |    |-- r2Overall: double (nullable = true)\n
"},{"location":"components/dataset/study_locus/_study_locus/#study-locus-quality-controls","title":"Study-locus quality controls","text":"

Bases: Enum

Study-Locus quality control options listing concerns on the quality of the association.

Attributes:

Name Type Description SUBSIGNIFICANT_FLAG str

p-value below significance threshold

NO_GENOMIC_LOCATION_FLAG str

Incomplete genomic mapping

COMPOSITE_FLAG str

Composite association due to variant x variant interactions

VARIANT_INCONSISTENCY_FLAG str

Inconsistencies in the reported variants

NON_MAPPED_VARIANT_FLAG str

Variant not mapped to GnomAd

PALINDROMIC_ALLELE_FLAG str

Alleles are palindromic - cannot harmonize

AMBIGUOUS_STUDY str

Association with ambiguous study

UNRESOLVED_LD str

Variant not found in LD reference

LD_CLUMPED str

Explained by a more significant variant in high LD (clumped)

Source code in src/otg/dataset/study_locus.py
class StudyLocusQualityCheck(Enum):\n\"\"\"Study-Locus quality control options listing concerns on the quality of the association.\n\n    Attributes:\n        SUBSIGNIFICANT_FLAG (str): p-value below significance threshold\n        NO_GENOMIC_LOCATION_FLAG (str): Incomplete genomic mapping\n        COMPOSITE_FLAG (str): Composite association due to variant x variant interactions\n        VARIANT_INCONSISTENCY_FLAG (str): Inconsistencies in the reported variants\n        NON_MAPPED_VARIANT_FLAG (str): Variant not mapped to GnomAd\n        PALINDROMIC_ALLELE_FLAG (str): Alleles are palindromic - cannot harmonize\n        AMBIGUOUS_STUDY (str): Association with ambiguous study\n        UNRESOLVED_LD (str): Variant not found in LD reference\n        LD_CLUMPED (str): Explained by a more significant variant in high LD (clumped)\n    \"\"\"\n\n    SUBSIGNIFICANT_FLAG = \"Subsignificant p-value\"\n    NO_GENOMIC_LOCATION_FLAG = \"Incomplete genomic mapping\"\n    COMPOSITE_FLAG = \"Composite association\"\n    INCONSISTENCY_FLAG = \"Variant inconsistency\"\n    NON_MAPPED_VARIANT_FLAG = \"No mapping in GnomAd\"\n    PALINDROMIC_ALLELE_FLAG = \"Palindrome alleles - cannot harmonize\"\n    AMBIGUOUS_STUDY = \"Association with ambiguous study\"\n    UNRESOLVED_LD = \"Variant not found in LD reference\"\n    LD_CLUMPED = \"Explained by a more significant variant in high LD (clumped)\"\n
"},{"location":"components/dataset/study_locus/_study_locus/#credible-interval","title":"Credible interval","text":"

Bases: Enum

Credible interval enum.

Interval within which an unobserved parameter value falls with a particular probability.

Attributes:

Name Type Description IS95 str

95% credible interval

IS99 str

99% credible interval

Source code in src/otg/dataset/study_locus.py
class CredibleInterval(Enum):\n\"\"\"Credible interval enum.\n\n    Interval within which an unobserved parameter value falls with a particular probability.\n\n    Attributes:\n        IS95 (str): 95% credible interval\n        IS99 (str): 99% credible interval\n    \"\"\"\n\n    IS95 = \"is95CredibleSet\"\n    IS99 = \"is99CredibleSet\"\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/","title":"Study locus gwas catalog","text":"

Bases: StudyLocus

Study-locus dataset derived from GWAS Catalog.

Source code in src/otg/dataset/study_locus.py
class StudyLocusGWASCatalog(StudyLocus):\n\"\"\"Study-locus dataset derived from GWAS Catalog.\"\"\"\n\n    @staticmethod\n    def _parse_pvalue(pvalue: Column) -> tuple[Column, Column]:\n\"\"\"Parse p-value column.\n\n        Args:\n            pvalue (Column): p-value [string]\n\n        Returns:\n            tuple[Column, Column]: p-value mantissa and exponent\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"1.0\"), (\"0.5\"), (\"1E-20\"), (\"3E-3\"), (\"1E-1000\")]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.select('value',*StudyLocusGWASCatalog._parse_pvalue(f.col('value'))).show()\n            +-------+--------------+--------------+\n            |  value|pValueMantissa|pValueExponent|\n            +-------+--------------+--------------+\n            |    1.0|           1.0|             1|\n            |    0.5|           0.5|             1|\n            |  1E-20|           1.0|           -20|\n            |   3E-3|           3.0|            -3|\n            |1E-1000|           1.0|         -1000|\n            +-------+--------------+--------------+\n            <BLANKLINE>\n\n        \"\"\"\n        split = f.split(pvalue, \"E\")\n        return split.getItem(0).cast(\"float\").alias(\"pValueMantissa\"), f.coalesce(\n            split.getItem(1).cast(\"integer\"), f.lit(1)\n        ).alias(\"pValueExponent\")\n\n    @staticmethod\n    def _normalise_pvaluetext(p_value_text: Column) -> Column:\n\"\"\"Normalised p-value text column to a standardised format.\n\n        For cases where there is no mapping, the value is set to null.\n\n        Args:\n            p_value_text (Column): `pValueText` column from GWASCatalog\n\n        Returns:\n            Column: Array column after using GWAS Catalog mappings. There might be multiple mappings for a single p-value text.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"European Ancestry\"), (\"African ancestry\"), (\"Alzheimer\u2019s Disease\"), (\"(progression)\"), (\"\"), (None)]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.withColumn('normalised', StudyLocusGWASCatalog._normalise_pvaluetext(f.col('value'))).show()\n            +-------------------+----------+\n            |              value|normalised|\n            +-------------------+----------+\n            |  European Ancestry|      [EA]|\n            |   African ancestry|      [AA]|\n            |Alzheimer\u2019s Disease|      [AD]|\n            |      (progression)|      null|\n            |                   |      null|\n            |               null|      null|\n            +-------------------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # GWAS Catalog to p-value mapping\n        json_dict = json.loads(\n            pkg_resources.read_text(data, \"gwas_pValueText_map.json\", encoding=\"utf-8\")\n        )\n        map_expr = f.create_map(*[f.lit(x) for x in chain(*json_dict.items())])\n\n        splitted_col = f.split(f.regexp_replace(p_value_text, r\"[\\(\\)]\", \"\"), \",\")\n        mapped_col = f.transform(splitted_col, lambda x: map_expr[x])\n        return f.when(f.forall(mapped_col, lambda x: x.isNull()), None).otherwise(\n            mapped_col\n        )\n\n    @staticmethod\n    def _normalise_risk_allele(risk_allele: Column) -> Column:\n\"\"\"Normalised risk allele column to a standardised format.\n\n        If multiple risk alleles are present, the first one is returned.\n\n        Args:\n            risk_allele (Column): `riskAllele` column from GWASCatalog\n\n        Returns:\n            Column: mapped using GWAS Catalog mapping\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"rs1234-A-G\"), (\"rs1234-A\"), (\"rs1234-A; rs1235-G\")]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.withColumn('normalised', StudyLocusGWASCatalog._normalise_risk_allele(f.col('value'))).show()\n            +------------------+----------+\n            |             value|normalised|\n            +------------------+----------+\n            |        rs1234-A-G|         A|\n            |          rs1234-A|         A|\n            |rs1234-A; rs1235-G|         A|\n            +------------------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # GWAS Catalog to risk allele mapping\n        return f.split(f.split(risk_allele, \"; \").getItem(0), \"-\").getItem(1)\n\n    @staticmethod\n    def _collect_rsids(\n        snp_id: Column, snp_id_current: Column, risk_allele: Column\n    ) -> Column:\n\"\"\"It takes three columns, and returns an array of distinct values from those columns.\n\n        Args:\n            snp_id (Column): The original snp id from the GWAS catalog.\n            snp_id_current (Column): The current snp id field is just a number at the moment (stored as a string). Adding 'rs' prefix if looks good.\n            risk_allele (Column): The risk allele for the SNP.\n\n        Returns:\n            An array of distinct values.\n        \"\"\"\n        # The current snp id field is just a number at the moment (stored as a string). Adding 'rs' prefix if looks good.\n        snp_id_current = f.when(\n            snp_id_current.rlike(\"^[0-9]*$\"),\n            f.format_string(\"rs%s\", snp_id_current),\n        )\n        # Cleaning risk allele:\n        risk_allele = f.split(risk_allele, \"-\").getItem(0)\n\n        # Collecting all values:\n        return f.array_distinct(f.array(snp_id, snp_id_current, risk_allele))\n\n    @staticmethod\n    def _map_to_variant_annotation_variants(\n        gwas_associations: DataFrame, variant_annotation: VariantAnnotation\n    ) -> DataFrame:\n\"\"\"Add variant metadata in associations.\n\n        Args:\n            gwas_associations (DataFrame): raw GWAS Catalog associations\n            variant_annotation (VariantAnnotation): variant annotation dataset\n\n        Returns:\n            DataFrame: GWAS Catalog associations data including `variantId`, `referenceAllele`,\n            `alternateAllele`, `chromosome`, `position` with variant metadata\n        \"\"\"\n        # Subset of GWAS Catalog associations required for resolving variant IDs:\n        gwas_associations_subset = gwas_associations.select(\n            \"studyLocusId\",\n            f.col(\"CHR_ID\").alias(\"chromosome\"),\n            f.col(\"CHR_POS\").cast(IntegerType()).alias(\"position\"),\n            # List of all SNPs associated with the variant\n            StudyLocusGWASCatalog._collect_rsids(\n                f.split(f.col(\"SNPS\"), \"; \").getItem(0),\n                f.col(\"SNP_ID_CURRENT\"),\n                f.split(f.col(\"STRONGEST SNP-RISK ALLELE\"), \"; \").getItem(0),\n            ).alias(\"rsIdsGwasCatalog\"),\n            StudyLocusGWASCatalog._normalise_risk_allele(\n                f.col(\"STRONGEST SNP-RISK ALLELE\")\n            ).alias(\"riskAllele\"),\n        )\n\n        # Subset of variant annotation required for GWAS Catalog annotations:\n        va_subset = variant_annotation.df.select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            f.col(\"rsIds\").alias(\"rsIdsGnomad\"),\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"alleleFrequencies\",\n            variant_annotation.max_maf().alias(\"maxMaf\"),\n        ).join(\n            f.broadcast(\n                gwas_associations_subset.select(\"chromosome\", \"position\").distinct()\n            ),\n            on=[\"chromosome\", \"position\"],\n            how=\"inner\",\n        )\n\n        # Semi-resolved ids (still contains duplicates when conclusion was not possible to make\n        # based on rsIds or allele concordance)\n        filtered_associations = (\n            gwas_associations_subset.join(\n                f.broadcast(va_subset),\n                on=[\"chromosome\", \"position\"],\n                how=\"left\",\n            )\n            .withColumn(\n                \"rsIdFilter\",\n                StudyLocusGWASCatalog._flag_mappings_to_retain(\n                    f.col(\"studyLocusId\"),\n                    StudyLocusGWASCatalog._compare_rsids(\n                        f.col(\"rsIdsGnomad\"), f.col(\"rsIdsGwasCatalog\")\n                    ),\n                ),\n            )\n            .withColumn(\n                \"concordanceFilter\",\n                StudyLocusGWASCatalog._flag_mappings_to_retain(\n                    f.col(\"studyLocusId\"),\n                    StudyLocusGWASCatalog._check_concordance(\n                        f.col(\"riskAllele\"),\n                        f.col(\"referenceAllele\"),\n                        f.col(\"alternateAllele\"),\n                    ),\n                ),\n            )\n            .filter(\n                # Filter out rows where GWAS Catalog rsId does not match with GnomAD rsId,\n                # but there is corresponding variant for the same association\n                f.col(\"rsIdFilter\")\n                # or filter out rows where GWAS Catalog alleles are not concordant with GnomAD alleles,\n                # but there is corresponding variant for the same association\n                | f.col(\"concordanceFilter\")\n            )\n        )\n\n        # Keep only highest maxMaf variant per studyLocusId\n        fully_mapped_associations = get_record_with_maximum_value(\n            filtered_associations, grouping_col=\"studyLocusId\", sorting_col=\"maxMaf\"\n        ).select(\n            \"studyLocusId\",\n            \"variantId\",\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"chromosome\",\n            \"position\",\n        )\n\n        return gwas_associations.join(\n            fully_mapped_associations, on=\"studyLocusId\", how=\"left\"\n        )\n\n    @staticmethod\n    def _compare_rsids(gnomad: Column, gwas: Column) -> Column:\n\"\"\"If the intersection of the two arrays is greater than 0, return True, otherwise return False.\n\n        Args:\n            gnomad (Column): rsids from gnomad\n            gwas (Column): rsids from the GWAS Catalog\n\n        Returns:\n            A boolean column that is true if the GnomAD rsIDs can be found in the GWAS rsIDs.\n\n        Examples:\n            >>> d = [\n            ...    (1, [\"rs123\", \"rs523\"], [\"rs123\"]),\n            ...    (2, [], [\"rs123\"]),\n            ...    (3, [\"rs123\", \"rs523\"], []),\n            ...    (4, [], []),\n            ... ]\n            >>> df = spark.createDataFrame(d, ['associationId', 'gnomad', 'gwas'])\n            >>> df.withColumn(\"rsid_matches\", StudyLocusGWASCatalog._compare_rsids(f.col(\"gnomad\"),f.col('gwas'))).show()\n            +-------------+--------------+-------+------------+\n            |associationId|        gnomad|   gwas|rsid_matches|\n            +-------------+--------------+-------+------------+\n            |            1|[rs123, rs523]|[rs123]|        true|\n            |            2|            []|[rs123]|       false|\n            |            3|[rs123, rs523]|     []|       false|\n            |            4|            []|     []|       false|\n            +-------------+--------------+-------+------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.when(f.size(f.array_intersect(gnomad, gwas)) > 0, True).otherwise(\n            False\n        )\n\n    @staticmethod\n    def _flag_mappings_to_retain(\n        association_id: Column, filter_column: Column\n    ) -> Column:\n\"\"\"Flagging mappings to drop for each association.\n\n        Some associations have multiple mappings. Some has matching rsId others don't. We only\n        want to drop the non-matching mappings, when a matching is available for the given association.\n        This logic can be generalised for other measures eg. allele concordance.\n\n        Args:\n            association_id (Column): association identifier column\n            filter_column (Column): boolean col indicating to keep a mapping\n\n        Returns:\n            A column with a boolean value.\n\n        Examples:\n        >>> d = [\n        ...    (1, False),\n        ...    (1, False),\n        ...    (2, False),\n        ...    (2, True),\n        ...    (3, True),\n        ...    (3, True),\n        ... ]\n        >>> df = spark.createDataFrame(d, ['associationId', 'filter'])\n        >>> df.withColumn(\"isConcordant\", StudyLocusGWASCatalog._flag_mappings_to_retain(f.col(\"associationId\"),f.col('filter'))).show()\n        +-------------+------+------------+\n        |associationId|filter|isConcordant|\n        +-------------+------+------------+\n        |            1| false|        true|\n        |            1| false|        true|\n        |            2| false|       false|\n        |            2|  true|        true|\n        |            3|  true|        true|\n        |            3|  true|        true|\n        +-------------+------+------------+\n        <BLANKLINE>\n\n        \"\"\"\n        w = Window.partitionBy(association_id)\n\n        # Generating a boolean column informing if the filter column contains true anywhere for the association:\n        aggregated_filter = f.when(\n            f.array_contains(f.collect_set(filter_column).over(w), True), True\n        ).otherwise(False)\n\n        # Generate a filter column:\n        return f.when(aggregated_filter & (~filter_column), False).otherwise(True)\n\n    @staticmethod\n    def _check_concordance(\n        risk_allele: Column, reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the risk allele is concordant with the alt or ref allele.\n\n        If the risk allele is the same as the reference or alternate allele, or if the reverse complement of\n        the risk allele is the same as the reference or alternate allele, then the allele is concordant.\n        If no mapping is available (ref/alt is null), the function returns True.\n\n        Args:\n            risk_allele (Column): The allele that is associated with the risk of the disease.\n            reference_allele (Column): The reference allele from the GWAS catalog\n            alternate_allele (Column): The alternate allele of the variant.\n\n        Returns:\n            A boolean column that is True if the risk allele is the same as the reference or alternate allele,\n            or if the reverse complement of the risk allele is the same as the reference or alternate allele.\n\n        Examples:\n            >>> d = [\n            ...     ('A', 'A', 'G'),\n            ...     ('A', 'T', 'G'),\n            ...     ('A', 'C', 'G'),\n            ...     ('A', 'A', '?'),\n            ...     (None, None, 'A'),\n            ... ]\n            >>> df = spark.createDataFrame(d, ['riskAllele', 'referenceAllele', 'alternateAllele'])\n            >>> df.withColumn(\"isConcordant\", StudyLocusGWASCatalog._check_concordance(f.col(\"riskAllele\"),f.col('referenceAllele'), f.col('alternateAllele'))).show()\n            +----------+---------------+---------------+------------+\n            |riskAllele|referenceAllele|alternateAllele|isConcordant|\n            +----------+---------------+---------------+------------+\n            |         A|              A|              G|        true|\n            |         A|              T|              G|        true|\n            |         A|              C|              G|       false|\n            |         A|              A|              ?|        true|\n            |      null|           null|              A|        true|\n            +----------+---------------+---------------+------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Calculating the reverse complement of the risk allele:\n        risk_allele_reverse_complement = f.when(\n            risk_allele.rlike(r\"^[ACTG]+$\"),\n            f.reverse(f.translate(risk_allele, \"ACTG\", \"TGAC\")),\n        ).otherwise(risk_allele)\n\n        # OK, is the risk allele or the reverse complent is the same as the mapped alleles:\n        return (\n            f.when(\n                (risk_allele == reference_allele) | (risk_allele == alternate_allele),\n                True,\n            )\n            # If risk allele is found on the negative strand:\n            .when(\n                (risk_allele_reverse_complement == reference_allele)\n                | (risk_allele_reverse_complement == alternate_allele),\n                True,\n            )\n            # If risk allele is ambiguous, still accepted: < This condition could be reconsidered\n            .when(risk_allele == \"?\", True)\n            # If the association could not be mapped we keep it:\n            .when(reference_allele.isNull(), True)\n            # Allele is discordant:\n            .otherwise(False)\n        )\n\n    @staticmethod\n    def _get_reverse_complement(allele_col: Column) -> Column:\n\"\"\"A function to return the reverse complement of an allele column.\n\n        It takes a string and returns the reverse complement of that string if it's a DNA sequence,\n        otherwise it returns the original string. Assumes alleles in upper case.\n\n        Args:\n            allele_col (Column): The column containing the allele to reverse complement.\n\n        Returns:\n            A column that is the reverse complement of the allele column.\n\n        Examples:\n            >>> d = [{\"allele\": 'A'}, {\"allele\": 'T'},{\"allele\": 'G'}, {\"allele\": 'C'},{\"allele\": 'AC'}, {\"allele\": 'GTaatc'},{\"allele\": '?'}, {\"allele\": None}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"revcom_allele\", StudyLocusGWASCatalog._get_reverse_complement(f.col(\"allele\"))).show()\n            +------+-------------+\n            |allele|revcom_allele|\n            +------+-------------+\n            |     A|            T|\n            |     T|            A|\n            |     G|            C|\n            |     C|            G|\n            |    AC|           GT|\n            |GTaatc|       GATTAC|\n            |     ?|            ?|\n            |  null|         null|\n            +------+-------------+\n            <BLANKLINE>\n\n        \"\"\"\n        allele_col = f.upper(allele_col)\n        return f.when(\n            allele_col.rlike(\"[ACTG]+\"),\n            f.reverse(f.translate(allele_col, \"ACTG\", \"TGAC\")),\n        ).otherwise(allele_col)\n\n    @staticmethod\n    def _effect_needs_harmonisation(\n        risk_allele: Column, reference_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the effect allele needs to be harmonised.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Effect allele column\n\n        Returns:\n            A boolean column indicating if the effect allele needs to be harmonised.\n\n        Examples:\n            >>> d = [{\"risk\": 'A', \"reference\": 'A'}, {\"risk\": 'A', \"reference\": 'T'}, {\"risk\": 'AT', \"reference\": 'TA'}, {\"risk\": 'AT', \"reference\": 'AT'}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"needs_harmonisation\", StudyLocusGWASCatalog._effect_needs_harmonisation(f.col(\"risk\"), f.col(\"reference\"))).show()\n            +---------+----+-------------------+\n            |reference|risk|needs_harmonisation|\n            +---------+----+-------------------+\n            |        A|   A|               true|\n            |        T|   A|               true|\n            |       TA|  AT|              false|\n            |       AT|  AT|               true|\n            +---------+----+-------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return (risk_allele == reference_allele) | (\n            risk_allele\n            == StudyLocusGWASCatalog._get_reverse_complement(reference_allele)\n        )\n\n    @staticmethod\n    def _are_alleles_palindromic(\n        reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the alleles are palindromic.\n\n        Args:\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n\n        Returns:\n            A boolean column indicating if the alleles are palindromic.\n\n        Examples:\n            >>> d = [{\"reference\": 'A', \"alternate\": 'T'}, {\"reference\": 'AT', \"alternate\": 'AG'}, {\"reference\": 'AT', \"alternate\": 'AT'}, {\"reference\": 'CATATG', \"alternate\": 'CATATG'}, {\"reference\": '-', \"alternate\": None}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"is_palindromic\", StudyLocusGWASCatalog._are_alleles_palindromic(f.col(\"reference\"), f.col(\"alternate\"))).show()\n            +---------+---------+--------------+\n            |alternate|reference|is_palindromic|\n            +---------+---------+--------------+\n            |        T|        A|          true|\n            |       AG|       AT|         false|\n            |       AT|       AT|          true|\n            |   CATATG|   CATATG|          true|\n            |     null|        -|         false|\n            +---------+---------+--------------+\n            <BLANKLINE>\n\n        \"\"\"\n        revcomp = StudyLocusGWASCatalog._get_reverse_complement(alternate_allele)\n        return (\n            f.when(reference_allele == revcomp, True)\n            .when(revcomp.isNull(), False)\n            .otherwise(False)\n        )\n\n    @staticmethod\n    def _harmonise_beta(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n    ) -> Column:\n\"\"\"A function to extract the beta value from the effect size and confidence interval.\n\n        If the confidence interval contains the word \"increase\" or \"decrease\" it indicates, we are dealing with betas.\n        If it's \"increase\" and the effect size needs to be harmonized, then multiply the effect size by -1\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n\n        Returns:\n            A column containing the beta value.\n        \"\"\"\n        return (\n            f.when(\n                StudyLocusGWASCatalog._are_alleles_palindromic(\n                    reference_allele, alternate_allele\n                ),\n                None,\n            )\n            .when(\n                (\n                    StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & confidence_interval.contains(\"increase\")\n                )\n                | (\n                    ~StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & confidence_interval.contains(\"decrease\")\n                ),\n                -effect_size,\n            )\n            .otherwise(effect_size)\n            .cast(DoubleType())\n        )\n\n    @staticmethod\n    def _harmonise_beta_ci(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n        p_value: Column,\n        direction: str,\n    ) -> Column:\n\"\"\"Calculating confidence intervals for beta values.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n            p_value (Column): GWAS Catalog p-value column\n            direction (str): This is the direction of the confidence interval. It can be either \"upper\" or \"lower\".\n\n        Returns:\n            The upper and lower bounds of the confidence interval for the beta coefficient.\n        \"\"\"\n        zscore_95 = f.lit(1.96)\n        beta = StudyLocusGWASCatalog._harmonise_beta(\n            risk_allele,\n            reference_allele,\n            alternate_allele,\n            effect_size,\n            confidence_interval,\n        )\n        zscore = pvalue_to_zscore(p_value)\n        return (\n            f.when(f.lit(direction) == \"upper\", beta + f.abs(zscore_95 * beta) / zscore)\n            .when(f.lit(direction) == \"lower\", beta - f.abs(zscore_95 * beta) / zscore)\n            .otherwise(None)\n        )\n\n    @staticmethod\n    def _harmonise_odds_ratio(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n    ) -> Column:\n\"\"\"Harmonizing odds ratio.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n\n        Returns:\n            A column with the odds ratio, or 1/odds_ratio if harmonization required.\n        \"\"\"\n        return (\n            f.when(\n                StudyLocusGWASCatalog._are_alleles_palindromic(\n                    reference_allele, alternate_allele\n                ),\n                None,\n            )\n            .when(\n                (\n                    StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & ~confidence_interval.rlike(\"|\".join([\"decrease\", \"increase\"]))\n                ),\n                1 / effect_size,\n            )\n            .otherwise(effect_size)\n            .cast(DoubleType())\n        )\n\n    @staticmethod\n    def _harmonise_odds_ratio_ci(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n        p_value: Column,\n        direction: str,\n    ) -> Column:\n\"\"\"Calculating confidence intervals for beta values.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n            p_value (Column): GWAS Catalog p-value column\n            direction (str): This is the direction of the confidence interval. It can be either \"upper\" or \"lower\".\n\n        Returns:\n            The upper and lower bounds of the 95% confidence interval for the odds ratio.\n        \"\"\"\n        zscore_95 = f.lit(1.96)\n        odds_ratio = StudyLocusGWASCatalog._harmonise_odds_ratio(\n            risk_allele,\n            reference_allele,\n            alternate_allele,\n            effect_size,\n            confidence_interval,\n        )\n        odds_ratio_estimate = f.log(odds_ratio)\n        zscore = pvalue_to_zscore(p_value)\n        odds_ratio_se = odds_ratio_estimate / zscore\n        return f.when(\n            f.lit(direction) == \"upper\",\n            f.exp(odds_ratio_estimate + f.abs(zscore_95 * odds_ratio_se)),\n        ).when(\n            f.lit(direction) == \"lower\",\n            f.exp(odds_ratio_estimate - f.abs(zscore_95 * odds_ratio_se)),\n        )\n\n    @staticmethod\n    def _concatenate_substudy_description(\n        association_trait: Column, pvalue_text: Column, mapped_trait_uri: Column\n    ) -> Column:\n\"\"\"Substudy description parsing. Complex string containing metadata about the substudy (e.g. QTL, specific EFO, etc.).\n\n        Args:\n            association_trait (Column): GWAS Catalog association trait column\n            pvalue_text (Column): GWAS Catalog p-value text column\n            mapped_trait_uri (Column): GWAS Catalog mapped trait URI column\n\n        Returns:\n            A column with the substudy description in the shape trait|pvaluetext1_pvaluetext2|EFO1_EFO2.\n\n        Examples:\n        >>> df = spark.createDataFrame([\n        ...    (\"Height\", \"http://www.ebi.ac.uk/efo/EFO_0000001,http://www.ebi.ac.uk/efo/EFO_0000002\", \"European Ancestry\"),\n        ...    (\"Schizophrenia\", \"http://www.ebi.ac.uk/efo/MONDO_0005090\", None)],\n        ...    [\"association_trait\", \"mapped_trait_uri\", \"pvalue_text\"]\n        ... )\n        >>> df.withColumn('substudy_description', StudyLocusGWASCatalog._concatenate_substudy_description(df.association_trait, df.pvalue_text, df.mapped_trait_uri)).show(truncate=False)\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        |association_trait|mapped_trait_uri                                                         |pvalue_text      |substudy_description                      |\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        |Height           |http://www.ebi.ac.uk/efo/EFO_0000001,http://www.ebi.ac.uk/efo/EFO_0000002|European Ancestry|Height|EA|EFO_0000001/EFO_0000002         |\n        |Schizophrenia    |http://www.ebi.ac.uk/efo/MONDO_0005090                                   |null             |Schizophrenia|no_pvalue_text|MONDO_0005090|\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        <BLANKLINE>\n        \"\"\"\n        p_value_text = f.coalesce(\n            StudyLocusGWASCatalog._normalise_pvaluetext(pvalue_text),\n            f.array(f.lit(\"no_pvalue_text\")),\n        )\n        return f.concat_ws(\n            \"|\",\n            association_trait,\n            f.concat_ws(\n                \"/\",\n                p_value_text,\n            ),\n            f.concat_ws(\n                \"/\",\n                parse_efos(mapped_trait_uri),\n            ),\n        )\n\n    @staticmethod\n    def _qc_all(\n        qc: Column,\n        chromosome: Column,\n        position: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        strongest_snp_risk_allele: Column,\n        p_value_mantissa: Column,\n        p_value_exponent: Column,\n        p_value_cutoff: float,\n    ) -> Column:\n\"\"\"Flag associations that fail any QC.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column\n            position (Column): Position column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            strongest_snp_risk_allele (Column): Strongest SNP risk allele column\n            p_value_mantissa (Column): P-value mantissa column\n            p_value_exponent (Column): P-value exponent column\n            p_value_cutoff (float): P-value cutoff\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        qc = StudyLocusGWASCatalog._qc_variant_interactions(\n            qc, strongest_snp_risk_allele\n        )\n        qc = StudyLocusGWASCatalog._qc_subsignificant_associations(\n            qc, p_value_mantissa, p_value_exponent, p_value_cutoff\n        )\n        qc = StudyLocusGWASCatalog._qc_genomic_location(qc, chromosome, position)\n        qc = StudyLocusGWASCatalog._qc_variant_inconsistencies(\n            qc, chromosome, position, strongest_snp_risk_allele\n        )\n        qc = StudyLocusGWASCatalog._qc_unmapped_variants(qc, alternate_allele)\n        qc = StudyLocusGWASCatalog._qc_palindromic_alleles(\n            qc, reference_allele, alternate_allele\n        )\n        return qc\n\n    @staticmethod\n    def _qc_variant_interactions(\n        qc: Column, strongest_snp_risk_allele: Column\n    ) -> Column:\n\"\"\"Flag associations based on variant x variant interactions.\n\n        Args:\n            qc (Column): QC column\n            strongest_snp_risk_allele (Column): Column with the strongest SNP risk allele\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        return StudyLocusGWASCatalog._update_quality_flag(\n            qc,\n            strongest_snp_risk_allele.contains(\";\"),\n            StudyLocusQualityCheck.COMPOSITE_FLAG,\n        )\n\n    @staticmethod\n    def _qc_subsignificant_associations(\n        qc: Column,\n        p_value_mantissa: Column,\n        p_value_exponent: Column,\n        pvalue_cutoff: float,\n    ) -> Column:\n\"\"\"Flag associations below significant threshold.\n\n        Args:\n            qc (Column): QC column\n            p_value_mantissa (Column): P-value mantissa column\n            p_value_exponent (Column): P-value exponent column\n            pvalue_cutoff (float): association p-value cut-off\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Examples:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -7}, {'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -8}, {'qc': None, 'p_value_mantissa': 5, 'p_value_exponent': -8}, {'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -9}]\n            >>> df = spark.createDataFrame(d, t.StructType([t.StructField('qc', t.ArrayType(t.StringType()), True), t.StructField('p_value_mantissa', t.IntegerType()), t.StructField('p_value_exponent', t.IntegerType())]))\n            >>> df.withColumn('qc', StudyLocusGWASCatalog._qc_subsignificant_associations(f.col(\"qc\"), f.col(\"p_value_mantissa\"), f.col(\"p_value_exponent\"), 5e-8)).show(truncate = False)\n            +------------------------+----------------+----------------+\n            |qc                      |p_value_mantissa|p_value_exponent|\n            +------------------------+----------------+----------------+\n            |[Subsignificant p-value]|1               |-7              |\n            |[]                      |1               |-8              |\n            |[]                      |5               |-8              |\n            |[]                      |1               |-9              |\n            +------------------------+----------------+----------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            calculate_neglog_pvalue(p_value_mantissa, p_value_exponent)\n            < f.lit(-np.log10(pvalue_cutoff)),\n            StudyLocusQualityCheck.SUBSIGNIFICANT_FLAG,\n        )\n\n    @staticmethod\n    def _qc_genomic_location(\n        qc: Column, chromosome: Column, position: Column\n    ) -> Column:\n\"\"\"Flag associations without genomic location in GWAS Catalog.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column in GWAS Catalog\n            position (Column): Position column in GWAS Catalog\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Examples:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'qc': None, 'chromosome': None, 'position': None}, {'qc': None, 'chromosome': '1', 'position': None}, {'qc': None, 'chromosome': None, 'position': 1}, {'qc': None, 'chromosome': '1', 'position': 1}]\n            >>> df = spark.createDataFrame(d, schema=t.StructType([t.StructField('qc', t.ArrayType(t.StringType()), True), t.StructField('chromosome', t.StringType()), t.StructField('position', t.IntegerType())]))\n            >>> df.withColumn('qc', StudyLocusGWASCatalog._qc_genomic_location(df.qc, df.chromosome, df.position)).show(truncate=False)\n            +----------------------------+----------+--------+\n            |qc                          |chromosome|position|\n            +----------------------------+----------+--------+\n            |[Incomplete genomic mapping]|null      |null    |\n            |[Incomplete genomic mapping]|1         |null    |\n            |[Incomplete genomic mapping]|null      |1       |\n            |[]                          |1         |1       |\n            +----------------------------+----------+--------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            position.isNull() | chromosome.isNull(),\n            StudyLocusQualityCheck.NO_GENOMIC_LOCATION_FLAG,\n        )\n\n    @staticmethod\n    def _qc_variant_inconsistencies(\n        qc: Column,\n        chromosome: Column,\n        position: Column,\n        strongest_snp_risk_allele: Column,\n    ) -> Column:\n\"\"\"Flag associations with inconsistencies in the variant annotation.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column in GWAS Catalog\n            position (Column): Position column in GWAS Catalog\n            strongest_snp_risk_allele (Column): Strongest SNP risk allele column in GWAS Catalog\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        return StudyLocusGWASCatalog._update_quality_flag(\n            qc,\n            # Number of chromosomes does not correspond to the number of positions:\n            (f.size(f.split(chromosome, \";\")) != f.size(f.split(position, \";\")))\n            # Number of chromosome values different from riskAllele values:\n            | (\n                f.size(f.split(chromosome, \";\"))\n                != f.size(f.split(strongest_snp_risk_allele, \";\"))\n            ),\n            StudyLocusQualityCheck.INCONSISTENCY_FLAG,\n        )\n\n    @staticmethod\n    def _qc_unmapped_variants(qc: Column, alternate_allele: Column) -> Column:\n\"\"\"Flag associations with variants not mapped to variantAnnotation.\n\n        Args:\n            qc (Column): QC column\n            alternate_allele (Column): alternate allele\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'alternate_allele': 'A', 'qc': None}, {'alternate_allele': None, 'qc': None}]\n            >>> schema = t.StructType([t.StructField('alternate_allele', t.StringType(), True), t.StructField('qc', t.ArrayType(t.StringType()), True)])\n            >>> df = spark.createDataFrame(data=d, schema=schema)\n            >>> df.withColumn(\"new_qc\", StudyLocusGWASCatalog._qc_unmapped_variants(f.col(\"qc\"), f.col(\"alternate_allele\"))).show()\n            +----------------+----+--------------------+\n            |alternate_allele|  qc|              new_qc|\n            +----------------+----+--------------------+\n            |               A|null|                  []|\n            |            null|null|[No mapping in Gn...|\n            +----------------+----+--------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            alternate_allele.isNull(),\n            StudyLocusQualityCheck.NON_MAPPED_VARIANT_FLAG,\n        )\n\n    @staticmethod\n    def _qc_palindromic_alleles(\n        qc: Column, reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"Flag associations with palindromic variants which effects can not be harmonised.\n\n        Args:\n            qc (Column): QC column\n            reference_allele (Column): reference allele\n            alternate_allele (Column): alternate allele\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> schema = t.StructType([t.StructField('reference_allele', t.StringType(), True), t.StructField('alternate_allele', t.StringType(), True), t.StructField('qc', t.ArrayType(t.StringType()), True)])\n            >>> d = [{'reference_allele': 'A', 'alternate_allele': 'T', 'qc': None}, {'reference_allele': 'AT', 'alternate_allele': 'TA', 'qc': None}, {'reference_allele': 'AT', 'alternate_allele': 'AT', 'qc': None}]\n            >>> df = spark.createDataFrame(data=d, schema=schema)\n            >>> df.withColumn(\"qc\", StudyLocusGWASCatalog._qc_palindromic_alleles(f.col(\"qc\"), f.col(\"reference_allele\"), f.col(\"alternate_allele\"))).show(truncate=False)\n            +----------------+----------------+---------------------------------------+\n            |reference_allele|alternate_allele|qc                                     |\n            +----------------+----------------+---------------------------------------+\n            |A               |T               |[Palindrome alleles - cannot harmonize]|\n            |AT              |TA              |[]                                     |\n            |AT              |AT              |[Palindrome alleles - cannot harmonize]|\n            +----------------+----------------+---------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            StudyLocusGWASCatalog._are_alleles_palindromic(\n                reference_allele, alternate_allele\n            ),\n            StudyLocusQualityCheck.PALINDROMIC_ALLELE_FLAG,\n        )\n\n    @classmethod\n    def from_source(\n        cls: type[StudyLocusGWASCatalog],\n        gwas_associations: DataFrame,\n        variant_annotation: VariantAnnotation,\n        pvalue_threshold: float = 5e-8,\n    ) -> StudyLocusGWASCatalog:\n\"\"\"Read GWASCatalog associations.\n\n        It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and\n        applies some pre-defined filters on the data:\n\n        Args:\n            gwas_associations (DataFrame): GWAS Catalog raw associations dataset\n            variant_annotation (VariantAnnotation): Variant annotation dataset\n            pvalue_threshold (float): P-value threshold for flagging associations\n\n        Returns:\n            StudyLocusGWASCatalog: StudyLocusGWASCatalog dataset\n        \"\"\"\n        return cls(\n            _df=gwas_associations.withColumn(\n                \"studyLocusId\", f.monotonically_increasing_id().cast(LongType())\n            )\n            .transform(\n                # Map/harmonise variants to variant annotation dataset:\n                # This function adds columns: variantId, referenceAllele, alternateAllele, chromosome, position\n                lambda df: StudyLocusGWASCatalog._map_to_variant_annotation_variants(\n                    df, variant_annotation\n                )\n            )\n            .withColumn(\n                # Perform all quality control checks:\n                \"qualityControls\",\n                StudyLocusGWASCatalog._qc_all(\n                    f.array().alias(\"qualityControls\"),\n                    f.col(\"CHR_ID\"),\n                    f.col(\"CHR_POS\").cast(IntegerType()),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"STRONGEST SNP-RISK ALLELE\"),\n                    *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                    pvalue_threshold,\n                ),\n            )\n            .select(\n                # INSIDE STUDY-LOCUS SCHEMA:\n                \"studyLocusId\",\n                \"variantId\",\n                # Mapped genomic location of the variant (; separated list)\n                \"chromosome\",\n                \"position\",\n                f.col(\"STUDY ACCESSION\").alias(\"studyId\"),\n                # beta value of the association\n                StudyLocusGWASCatalog._harmonise_beta(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                ).alias(\"beta\"),\n                # odds ratio of the association\n                StudyLocusGWASCatalog._harmonise_odds_ratio(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                ).alias(\"oddsRatio\"),\n                # CI lower of the beta value\n                StudyLocusGWASCatalog._harmonise_beta_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"lower\",\n                ).alias(\"betaConfidenceIntervalLower\"),\n                # CI upper for the beta value\n                StudyLocusGWASCatalog._harmonise_beta_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"upper\",\n                ).alias(\"betaConfidenceIntervalUpper\"),\n                # CI lower of the odds ratio value\n                StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"lower\",\n                ).alias(\"oddsRatioConfidenceIntervalLower\"),\n                # CI upper of the odds ratio value\n                StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"upper\",\n                ).alias(\"oddsRatioConfidenceIntervalUpper\"),\n                # p-value of the association, string: split into exponent and mantissa.\n                *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                # Capturing phenotype granularity at the association level\n                StudyLocusGWASCatalog._concatenate_substudy_description(\n                    f.col(\"DISEASE/TRAIT\"),\n                    f.col(\"P-VALUE (TEXT)\"),\n                    f.col(\"MAPPED_TRAIT_URI\"),\n                ).alias(\"subStudyDescription\"),\n                # Quality controls (array of strings)\n                \"qualityControls\",\n            )\n        )\n\n    def update_study_id(\n        self: StudyLocusGWASCatalog, study_annotation: DataFrame\n    ) -> StudyLocusGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n        Args:\n            study_annotation (DataFrame): Dataframe containing `updatedStudyId` and key columns `studyId` and `subStudyDescription`.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        self.df = (\n            self._df.join(\n                study_annotation, on=[\"studyId\", \"subStudyDescription\"], how=\"left\"\n            )\n            .withColumn(\"studyId\", f.coalesce(\"updatedStudyId\", \"studyId\"))\n            .drop(\"subStudyDescription\", \"updatedStudyId\")\n        )\n        return self\n\n    def annotate_ld(\n        self: StudyLocusGWASCatalog,\n        session: Session,\n        studies: StudyIndexGWASCatalog,\n        ld_populations: list[str],\n        ld_index_template: str,\n        ld_matrix_template: str,\n        min_r2: float,\n    ) -> StudyLocus:\n\"\"\"Annotate LD set for every studyLocus using gnomAD.\n\n        Args:\n            session (Session): Session\n            studies (StudyIndexGWASCatalog): Study index containing ancestry information\n            ld_populations (list[str]): List of populations to annotate\n            ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n            ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n            min_r2 (float): Minimum r2 to include in the LD set\n\n        Returns:\n            StudyLocus: Study-locus with an annotated credible set.\n        \"\"\"\n        # TODO: call unique_study_locus_ancestries here so that it is not duplicated with ld_annotation_by_locus_ancestry\n        # LD annotation for all unique lead variants in all populations (study independent).\n        ld_r = LDAnnotatorGnomad.ld_annotation_by_locus_ancestry(\n            session,\n            self,\n            studies,\n            ld_populations,\n            ld_index_template,\n            ld_matrix_template,\n            min_r2,\n        ).coalesce(400)\n\n        ld_set = (\n            self.unique_study_locus_ancestries(studies)\n            .join(ld_r, on=[\"chromosome\", \"variantId\", \"gnomadPopulation\"], how=\"left\")\n            .withColumn(\"r2\", f.pow(f.col(\"r\"), f.lit(2)))\n            .withColumn(\n                \"r2Overall\",\n                LDAnnotatorGnomad.weighted_r_overall(\n                    f.col(\"chromosome\"),\n                    f.col(\"studyId\"),\n                    f.col(\"variantId\"),\n                    f.col(\"tagVariantId\"),\n                    f.col(\"relativeSampleSize\"),\n                    f.col(\"r2\"),\n                ),\n            )\n            .groupBy(\"chromosome\", \"studyId\", \"variantId\")\n            .agg(\n                f.collect_set(\n                    f.when(\n                        f.col(\"tagVariantId\").isNotNull(),\n                        f.struct(\"tagVariantId\", \"r2Overall\"),\n                    )\n                ).alias(\"credibleSet\")\n            )\n        )\n\n        self.df = self.df.join(\n            ld_set, on=[\"chromosome\", \"studyId\", \"variantId\"], how=\"left\"\n        )\n\n        return self._qc_unresolved_ld()\n\n    def _qc_ambiguous_study(self: StudyLocusGWASCatalog) -> StudyLocusGWASCatalog:\n\"\"\"Flag associations with variants that can not be unambiguously associated with one study.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        assoc_ambiguity_window = Window.partitionBy(\n            f.col(\"studyId\"), f.col(\"variantId\")\n        )\n\n        self._df.withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.count(f.col(\"variantId\")).over(assoc_ambiguity_window) > 1,\n                StudyLocusQualityCheck.AMBIGUOUS_STUDY,\n            ),\n        )\n        return self\n\n    def _qc_unresolved_ld(self: StudyLocusGWASCatalog) -> StudyLocusGWASCatalog:\n\"\"\"Flag associations with variants that are not found in the LD reference.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        self._df.withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.col(\"credibleSet\").isNull(),\n                StudyLocusQualityCheck.UNRESOLVED_LD,\n            ),\n        )\n        return self\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.annotate_ld","title":"annotate_ld(session, studies, ld_populations, ld_index_template, ld_matrix_template, min_r2)","text":"

Annotate LD set for every studyLocus using gnomAD.

Parameters:

Name Type Description Default session Session

Session

required studies StudyIndexGWASCatalog

Study index containing ancestry information

required ld_populations list[str]

List of populations to annotate

required ld_index_template str

Template path of the LD matrix index containing {POP} where the population is expected

required ld_matrix_template str

Template path of the LD matrix containing {POP} where the population is expected

required min_r2 float

Minimum r2 to include in the LD set

required

Returns:

Name Type Description StudyLocus StudyLocus

Study-locus with an annotated credible set.

Source code in src/otg/dataset/study_locus.py
def annotate_ld(\n    self: StudyLocusGWASCatalog,\n    session: Session,\n    studies: StudyIndexGWASCatalog,\n    ld_populations: list[str],\n    ld_index_template: str,\n    ld_matrix_template: str,\n    min_r2: float,\n) -> StudyLocus:\n\"\"\"Annotate LD set for every studyLocus using gnomAD.\n\n    Args:\n        session (Session): Session\n        studies (StudyIndexGWASCatalog): Study index containing ancestry information\n        ld_populations (list[str]): List of populations to annotate\n        ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n        ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n        min_r2 (float): Minimum r2 to include in the LD set\n\n    Returns:\n        StudyLocus: Study-locus with an annotated credible set.\n    \"\"\"\n    # TODO: call unique_study_locus_ancestries here so that it is not duplicated with ld_annotation_by_locus_ancestry\n    # LD annotation for all unique lead variants in all populations (study independent).\n    ld_r = LDAnnotatorGnomad.ld_annotation_by_locus_ancestry(\n        session,\n        self,\n        studies,\n        ld_populations,\n        ld_index_template,\n        ld_matrix_template,\n        min_r2,\n    ).coalesce(400)\n\n    ld_set = (\n        self.unique_study_locus_ancestries(studies)\n        .join(ld_r, on=[\"chromosome\", \"variantId\", \"gnomadPopulation\"], how=\"left\")\n        .withColumn(\"r2\", f.pow(f.col(\"r\"), f.lit(2)))\n        .withColumn(\n            \"r2Overall\",\n            LDAnnotatorGnomad.weighted_r_overall(\n                f.col(\"chromosome\"),\n                f.col(\"studyId\"),\n                f.col(\"variantId\"),\n                f.col(\"tagVariantId\"),\n                f.col(\"relativeSampleSize\"),\n                f.col(\"r2\"),\n            ),\n        )\n        .groupBy(\"chromosome\", \"studyId\", \"variantId\")\n        .agg(\n            f.collect_set(\n                f.when(\n                    f.col(\"tagVariantId\").isNotNull(),\n                    f.struct(\"tagVariantId\", \"r2Overall\"),\n                )\n            ).alias(\"credibleSet\")\n        )\n    )\n\n    self.df = self.df.join(\n        ld_set, on=[\"chromosome\", \"studyId\", \"variantId\"], how=\"left\"\n    )\n\n    return self._qc_unresolved_ld()\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.from_source","title":"from_source(gwas_associations, variant_annotation, pvalue_threshold=5e-08) classmethod","text":"

Read GWASCatalog associations.

It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and applies some pre-defined filters on the data:

Parameters:

Name Type Description Default gwas_associations DataFrame

GWAS Catalog raw associations dataset

required variant_annotation VariantAnnotation

Variant annotation dataset

required pvalue_threshold float

P-value threshold for flagging associations

5e-08

Returns:

Name Type Description StudyLocusGWASCatalog StudyLocusGWASCatalog

StudyLocusGWASCatalog dataset

Source code in src/otg/dataset/study_locus.py
@classmethod\ndef from_source(\n    cls: type[StudyLocusGWASCatalog],\n    gwas_associations: DataFrame,\n    variant_annotation: VariantAnnotation,\n    pvalue_threshold: float = 5e-8,\n) -> StudyLocusGWASCatalog:\n\"\"\"Read GWASCatalog associations.\n\n    It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and\n    applies some pre-defined filters on the data:\n\n    Args:\n        gwas_associations (DataFrame): GWAS Catalog raw associations dataset\n        variant_annotation (VariantAnnotation): Variant annotation dataset\n        pvalue_threshold (float): P-value threshold for flagging associations\n\n    Returns:\n        StudyLocusGWASCatalog: StudyLocusGWASCatalog dataset\n    \"\"\"\n    return cls(\n        _df=gwas_associations.withColumn(\n            \"studyLocusId\", f.monotonically_increasing_id().cast(LongType())\n        )\n        .transform(\n            # Map/harmonise variants to variant annotation dataset:\n            # This function adds columns: variantId, referenceAllele, alternateAllele, chromosome, position\n            lambda df: StudyLocusGWASCatalog._map_to_variant_annotation_variants(\n                df, variant_annotation\n            )\n        )\n        .withColumn(\n            # Perform all quality control checks:\n            \"qualityControls\",\n            StudyLocusGWASCatalog._qc_all(\n                f.array().alias(\"qualityControls\"),\n                f.col(\"CHR_ID\"),\n                f.col(\"CHR_POS\").cast(IntegerType()),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"STRONGEST SNP-RISK ALLELE\"),\n                *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                pvalue_threshold,\n            ),\n        )\n        .select(\n            # INSIDE STUDY-LOCUS SCHEMA:\n            \"studyLocusId\",\n            \"variantId\",\n            # Mapped genomic location of the variant (; separated list)\n            \"chromosome\",\n            \"position\",\n            f.col(\"STUDY ACCESSION\").alias(\"studyId\"),\n            # beta value of the association\n            StudyLocusGWASCatalog._harmonise_beta(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n            ).alias(\"beta\"),\n            # odds ratio of the association\n            StudyLocusGWASCatalog._harmonise_odds_ratio(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n            ).alias(\"oddsRatio\"),\n            # CI lower of the beta value\n            StudyLocusGWASCatalog._harmonise_beta_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"lower\",\n            ).alias(\"betaConfidenceIntervalLower\"),\n            # CI upper for the beta value\n            StudyLocusGWASCatalog._harmonise_beta_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"upper\",\n            ).alias(\"betaConfidenceIntervalUpper\"),\n            # CI lower of the odds ratio value\n            StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"lower\",\n            ).alias(\"oddsRatioConfidenceIntervalLower\"),\n            # CI upper of the odds ratio value\n            StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"upper\",\n            ).alias(\"oddsRatioConfidenceIntervalUpper\"),\n            # p-value of the association, string: split into exponent and mantissa.\n            *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n            # Capturing phenotype granularity at the association level\n            StudyLocusGWASCatalog._concatenate_substudy_description(\n                f.col(\"DISEASE/TRAIT\"),\n                f.col(\"P-VALUE (TEXT)\"),\n                f.col(\"MAPPED_TRAIT_URI\"),\n            ).alias(\"subStudyDescription\"),\n            # Quality controls (array of strings)\n            \"qualityControls\",\n        )\n    )\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.update_study_id","title":"update_study_id(study_annotation)","text":"

Update studyId with a dataframe containing study.

Parameters:

Name Type Description Default study_annotation DataFrame

Dataframe containing updatedStudyId and key columns studyId and subStudyDescription.

required

Returns:

Name Type Description StudyLocusGWASCatalog StudyLocusGWASCatalog

Updated study locus.

Source code in src/otg/dataset/study_locus.py
def update_study_id(\n    self: StudyLocusGWASCatalog, study_annotation: DataFrame\n) -> StudyLocusGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n    Args:\n        study_annotation (DataFrame): Dataframe containing `updatedStudyId` and key columns `studyId` and `subStudyDescription`.\n\n    Returns:\n        StudyLocusGWASCatalog: Updated study locus.\n    \"\"\"\n    self.df = (\n        self._df.join(\n            study_annotation, on=[\"studyId\", \"subStudyDescription\"], how=\"left\"\n        )\n        .withColumn(\"studyId\", f.coalesce(\"updatedStudyId\", \"studyId\"))\n        .drop(\"subStudyDescription\", \"updatedStudyId\")\n    )\n    return self\n
"},{"location":"components/method/_method/","title":"Method","text":"

Methods used accross the Open Targets Genetics Pipeline

"},{"location":"components/method/clumping/","title":"Clumping","text":"

Clumping is a commonly used post-processing method that allows for identification of independent association signals from GWAS summary statistics and curated associations. This process is critical because of the complex linkage disequilibrium (LD) structure in human populations, which can result in multiple statistically significant associations within the same genomic region. Clumping methods help reduce redundancy in GWAS results and ensure that each reported association represents an independent signal.

We have implemented 2 clumping methods:

"},{"location":"components/method/clumping/#clumping-based-on-linkage-disequilibrium-ld","title":"Clumping based on Linkage Disequilibrium (LD)","text":"

LD clumping reports the most significant genetic associations in a region in terms of a smaller number of \u201cclumps\u201d of genetically linked SNPs.

Source code in src/otg/method/clump.py
class LDclumping:\n\"\"\"LD clumping reports the most significant genetic associations in a region in terms of a smaller number of \u201cclumps\u201d of genetically linked SNPs.\"\"\"\n\n    @staticmethod\n    def _is_lead_linked(\n        study_id: Column,\n        variant_id: Column,\n        p_value_exponent: Column,\n        p_value_mantissa: Column,\n        credible_set: Column,\n    ) -> Column:\n\"\"\"Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n        Args:\n            study_id (Column): studyId\n            variant_id (Column): Lead variant id\n            p_value_exponent (Column): p-value exponent\n            p_value_mantissa (Column): p-value mantissa\n            credible_set (Column): Credible set <array of structs>\n\n        Returns:\n            Column: Boolean in which True indicates that the lead is linked to another tag in the same dataset.\n        \"\"\"\n        leads_in_study = f.collect_set(variant_id).over(Window.partitionBy(study_id))\n        tags_in_studylocus = f.array_union(\n            # Get all tag variants from the credible set per studyLocusId\n            f.transform(credible_set, lambda x: x.tagVariantId),\n            # And append the lead variant so that the intersection is the same for all studyLocusIds in a study\n            f.array(f.col(\"variantId\")),\n        )\n        intersect_lead_tags = f.array_sort(\n            f.array_intersect(leads_in_study, tags_in_studylocus)\n        )\n        return (\n            # If the lead is in the credible set, we rank the peaks by p-value\n            f.when(\n                f.size(intersect_lead_tags) > 0,\n                f.row_number().over(\n                    Window.partitionBy(study_id, intersect_lead_tags).orderBy(\n                        p_value_exponent, p_value_mantissa\n                    )\n                )\n                > 1,\n            )\n            # If the intersection is empty (lead is not in the credible set or cred set is empty), the association is not linked\n            .otherwise(f.lit(False))\n        )\n\n    @classmethod\n    def clump(cls: type[LDclumping], associations: StudyLocus) -> StudyLocus:\n\"\"\"Perform clumping on studyLocus dataset.\n\n        Args:\n            associations (StudyLocus): StudyLocus dataset\n\n        Returns:\n            StudyLocus: including flag and removing credibleSet information for LD clumped loci.\n        \"\"\"\n        return associations.clump()\n
"},{"location":"components/method/clumping/#otg.method.clump.LDclumping.clump","title":"clump(associations) classmethod","text":"

Perform clumping on studyLocus dataset.

Parameters:

Name Type Description Default associations StudyLocus

StudyLocus dataset

required

Returns:

Name Type Description StudyLocus StudyLocus

including flag and removing credibleSet information for LD clumped loci.

Source code in src/otg/method/clump.py
@classmethod\ndef clump(cls: type[LDclumping], associations: StudyLocus) -> StudyLocus:\n\"\"\"Perform clumping on studyLocus dataset.\n\n    Args:\n        associations (StudyLocus): StudyLocus dataset\n\n    Returns:\n        StudyLocus: including flag and removing credibleSet information for LD clumped loci.\n    \"\"\"\n    return associations.clump()\n
"},{"location":"components/method/coloc/","title":"coloc","text":"

Calculate bayesian colocalisation based on overlapping signals from credible sets.

Based on the R COLOC package, which uses the Bayes factors from the credible set to estimate the posterior probability of colocalisation. This method makes the simplifying assumption that only one single causal variant exists for any given trait in any genomic region.

Hypothesis Description H0 no association with either trait in the region H1 association with trait 1 only H2 association with trait 2 only H3 both traits are associated, but have different single causal variants H4 both traits are associated and share the same single causal variant

Approximate Bayes factors required

Coloc requires the availability of approximate Bayes factors (ABF) for each variant in the credible set (logABF column).

Source code in src/otg/method/colocalisation.py
class Coloc:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals from credible sets.\n\n    Based on the [R COLOC package](https://github.com/chr1swallace/coloc/blob/main/R/claudia.R), which uses the Bayes factors from the credible set to estimate the posterior probability of colocalisation. This method makes the simplifying assumption that **only one single causal variant** exists for any given trait in any genomic region.\n\n    | Hypothesis    | Description                                                           |\n    | ------------- | --------------------------------------------------------------------- |\n    | H<sub>0</sub> | no association with either trait in the region                        |\n    | H<sub>1</sub> | association with trait 1 only                                         |\n    | H<sub>2</sub> | association with trait 2 only                                         |\n    | H<sub>3</sub> | both traits are associated, but have different single causal variants |\n    | H<sub>4</sub> | both traits are associated and share the same single causal variant   |\n\n    !!! warning \"Approximate Bayes factors required\"\n        Coloc requires the availability of approximate Bayes factors (ABF) for each variant in the credible set (`logABF` column).\n\n    \"\"\"\n\n    @staticmethod\n    def _get_logsum(log_abf: ndarray) -> float:\n\"\"\"Calculates logsum of vector.\n\n        This function calculates the log of the sum of the exponentiated\n        logs taking out the max, i.e. insuring that the sum is not Inf\n\n        Args:\n            log_abf (ndarray): log approximate bayes factor\n\n        Returns:\n            float: logsum\n\n        Example:\n            >>> l = [0.2, 0.1, 0.05, 0]\n            >>> round(Coloc._get_logsum(l), 6)\n            1.476557\n        \"\"\"\n        themax = np.max(log_abf)\n        result = themax + np.log(np.sum(np.exp(log_abf - themax)))\n        return float(result)\n\n    @staticmethod\n    def _get_posteriors(all_abfs: ndarray) -> DenseVector:\n\"\"\"Calculate posterior probabilities for each hypothesis.\n\n        Args:\n            all_abfs (ndarray): h0-h4 bayes factors\n\n        Returns:\n            DenseVector: Posterior\n\n        Example:\n            >>> l = np.array([0.2, 0.1, 0.05, 0])\n            >>> Coloc._get_posteriors(l)\n            DenseVector([0.279, 0.2524, 0.2401, 0.2284])\n        \"\"\"\n        diff = all_abfs - Coloc._get_logsum(all_abfs)\n        abfs_posteriors = np.exp(diff)\n        return Vectors.dense(abfs_posteriors)\n\n    @classmethod\n    def colocalise(\n        cls: type[Coloc],\n        overlapping_signals: StudyLocusOverlap,\n        priorc1: float = 1e-4,\n        priorc2: float = 1e-4,\n        priorc12: float = 1e-5,\n    ) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n        Args:\n            overlapping_signals (StudyLocusOverlap): overlapping peaks\n            priorc1 (float): Prior on variant being causal for trait 1. Defaults to 1e-4.\n            priorc2 (float): Prior on variant being causal for trait 2. Defaults to 1e-4.\n            priorc12 (float): Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.\n\n        Returns:\n            Colocalisation: Colocalisation results\n        \"\"\"\n        # register udfs\n        logsum = f.udf(Coloc._get_logsum, DoubleType())\n        posteriors = f.udf(Coloc._get_posteriors, VectorUDT())\n        return Colocalisation(\n            _df=(\n                overlapping_signals.df\n                # Before summing log_abf columns nulls need to be filled with 0:\n                .fillna(0, subset=[\"left_logABF\", \"right_logABF\"])\n                # Sum of log_abfs for each pair of signals\n                .withColumn(\"sum_log_abf\", f.col(\"left_logABF\") + f.col(\"right_logABF\"))\n                # Group by overlapping peak and generating dense vectors of log_abf:\n                .groupBy(\"chromosome\", \"left_studyLocusId\", \"right_studyLocusId\")\n                .agg(\n                    f.count(\"*\").alias(\"coloc_n_vars\"),\n                    fml.array_to_vector(f.collect_list(f.col(\"left_logABF\"))).alias(\n                        \"left_logABF\"\n                    ),\n                    fml.array_to_vector(f.collect_list(f.col(\"right_logABF\"))).alias(\n                        \"right_logABF\"\n                    ),\n                    fml.array_to_vector(f.collect_list(f.col(\"sum_log_abf\"))).alias(\n                        \"sum_log_abf\"\n                    ),\n                )\n                .withColumn(\"logsum1\", logsum(f.col(\"left_logABF\")))\n                .withColumn(\"logsum2\", logsum(f.col(\"right_logABF\")))\n                .withColumn(\"logsum12\", logsum(f.col(\"sum_log_abf\")))\n                .drop(\"left_logABF\", \"right_logABF\", \"sum_log_abf\")\n                # Add priors\n                # priorc1 Prior on variant being causal for trait 1\n                .withColumn(\"priorc1\", f.lit(priorc1))\n                # priorc2 Prior on variant being causal for trait 2\n                .withColumn(\"priorc2\", f.lit(priorc2))\n                # priorc12 Prior on variant being causal for traits 1 and 2\n                .withColumn(\"priorc12\", f.lit(priorc12))\n                # h0-h2\n                .withColumn(\"lH0abf\", f.lit(0))\n                .withColumn(\"lH1abf\", f.log(f.col(\"priorc1\")) + f.col(\"logsum1\"))\n                .withColumn(\"lH2abf\", f.log(f.col(\"priorc2\")) + f.col(\"logsum2\"))\n                # h3\n                .withColumn(\"sumlogsum\", f.col(\"logsum1\") + f.col(\"logsum2\"))\n                # exclude null H3/H4s: due to sumlogsum == logsum12\n                .filter(f.col(\"sumlogsum\") != f.col(\"logsum12\"))\n                .withColumn(\"max\", f.greatest(\"sumlogsum\", \"logsum12\"))\n                .withColumn(\n                    \"logdiff\",\n                    (\n                        f.col(\"max\")\n                        + f.log(\n                            f.exp(f.col(\"sumlogsum\") - f.col(\"max\"))\n                            - f.exp(f.col(\"logsum12\") - f.col(\"max\"))\n                        )\n                    ),\n                )\n                .withColumn(\n                    \"lH3abf\",\n                    f.log(f.col(\"priorc1\"))\n                    + f.log(f.col(\"priorc2\"))\n                    + f.col(\"logdiff\"),\n                )\n                .drop(\"right_logsum\", \"left_logsum\", \"sumlogsum\", \"max\", \"logdiff\")\n                # h4\n                .withColumn(\"lH4abf\", f.log(f.col(\"priorc12\")) + f.col(\"logsum12\"))\n                # cleaning\n                .drop(\n                    \"priorc1\", \"priorc2\", \"priorc12\", \"logsum1\", \"logsum2\", \"logsum12\"\n                )\n                # posteriors\n                .withColumn(\n                    \"allABF\",\n                    fml.array_to_vector(\n                        f.array(\n                            f.col(\"lH0abf\"),\n                            f.col(\"lH1abf\"),\n                            f.col(\"lH2abf\"),\n                            f.col(\"lH3abf\"),\n                            f.col(\"lH4abf\"),\n                        )\n                    ),\n                )\n                .withColumn(\n                    \"posteriors\", fml.vector_to_array(posteriors(f.col(\"allABF\")))\n                )\n                .withColumn(\"coloc_h0\", f.col(\"posteriors\").getItem(0))\n                .withColumn(\"coloc_h1\", f.col(\"posteriors\").getItem(1))\n                .withColumn(\"coloc_h2\", f.col(\"posteriors\").getItem(2))\n                .withColumn(\"coloc_h3\", f.col(\"posteriors\").getItem(3))\n                .withColumn(\"coloc_h4\", f.col(\"posteriors\").getItem(4))\n                .withColumn(\"coloc_h4_h3\", f.col(\"coloc_h4\") / f.col(\"coloc_h3\"))\n                .withColumn(\"coloc_log2_h4_h3\", f.log2(f.col(\"coloc_h4_h3\")))\n                # clean up\n                .drop(\n                    \"posteriors\",\n                    \"allABF\",\n                    \"coloc_h4_h3\",\n                    \"lH0abf\",\n                    \"lH1abf\",\n                    \"lH2abf\",\n                    \"lH3abf\",\n                    \"lH4abf\",\n                )\n                .withColumn(\"colocalisationMethod\", f.lit(\"COLOC\"))\n            )\n        )\n
"},{"location":"components/method/coloc/#otg.method.colocalisation.Coloc.colocalise","title":"colocalise(overlapping_signals, priorc1=0.0001, priorc2=0.0001, priorc12=1e-05) classmethod","text":"

Calculate bayesian colocalisation based on overlapping signals.

Parameters:

Name Type Description Default overlapping_signals StudyLocusOverlap

overlapping peaks

required priorc1 float

Prior on variant being causal for trait 1. Defaults to 1e-4.

0.0001 priorc2 float

Prior on variant being causal for trait 2. Defaults to 1e-4.

0.0001 priorc12 float

Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.

1e-05

Returns:

Name Type Description Colocalisation Colocalisation

Colocalisation results

Source code in src/otg/method/colocalisation.py
@classmethod\ndef colocalise(\n    cls: type[Coloc],\n    overlapping_signals: StudyLocusOverlap,\n    priorc1: float = 1e-4,\n    priorc2: float = 1e-4,\n    priorc12: float = 1e-5,\n) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n    Args:\n        overlapping_signals (StudyLocusOverlap): overlapping peaks\n        priorc1 (float): Prior on variant being causal for trait 1. Defaults to 1e-4.\n        priorc2 (float): Prior on variant being causal for trait 2. Defaults to 1e-4.\n        priorc12 (float): Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.\n\n    Returns:\n        Colocalisation: Colocalisation results\n    \"\"\"\n    # register udfs\n    logsum = f.udf(Coloc._get_logsum, DoubleType())\n    posteriors = f.udf(Coloc._get_posteriors, VectorUDT())\n    return Colocalisation(\n        _df=(\n            overlapping_signals.df\n            # Before summing log_abf columns nulls need to be filled with 0:\n            .fillna(0, subset=[\"left_logABF\", \"right_logABF\"])\n            # Sum of log_abfs for each pair of signals\n            .withColumn(\"sum_log_abf\", f.col(\"left_logABF\") + f.col(\"right_logABF\"))\n            # Group by overlapping peak and generating dense vectors of log_abf:\n            .groupBy(\"chromosome\", \"left_studyLocusId\", \"right_studyLocusId\")\n            .agg(\n                f.count(\"*\").alias(\"coloc_n_vars\"),\n                fml.array_to_vector(f.collect_list(f.col(\"left_logABF\"))).alias(\n                    \"left_logABF\"\n                ),\n                fml.array_to_vector(f.collect_list(f.col(\"right_logABF\"))).alias(\n                    \"right_logABF\"\n                ),\n                fml.array_to_vector(f.collect_list(f.col(\"sum_log_abf\"))).alias(\n                    \"sum_log_abf\"\n                ),\n            )\n            .withColumn(\"logsum1\", logsum(f.col(\"left_logABF\")))\n            .withColumn(\"logsum2\", logsum(f.col(\"right_logABF\")))\n            .withColumn(\"logsum12\", logsum(f.col(\"sum_log_abf\")))\n            .drop(\"left_logABF\", \"right_logABF\", \"sum_log_abf\")\n            # Add priors\n            # priorc1 Prior on variant being causal for trait 1\n            .withColumn(\"priorc1\", f.lit(priorc1))\n            # priorc2 Prior on variant being causal for trait 2\n            .withColumn(\"priorc2\", f.lit(priorc2))\n            # priorc12 Prior on variant being causal for traits 1 and 2\n            .withColumn(\"priorc12\", f.lit(priorc12))\n            # h0-h2\n            .withColumn(\"lH0abf\", f.lit(0))\n            .withColumn(\"lH1abf\", f.log(f.col(\"priorc1\")) + f.col(\"logsum1\"))\n            .withColumn(\"lH2abf\", f.log(f.col(\"priorc2\")) + f.col(\"logsum2\"))\n            # h3\n            .withColumn(\"sumlogsum\", f.col(\"logsum1\") + f.col(\"logsum2\"))\n            # exclude null H3/H4s: due to sumlogsum == logsum12\n            .filter(f.col(\"sumlogsum\") != f.col(\"logsum12\"))\n            .withColumn(\"max\", f.greatest(\"sumlogsum\", \"logsum12\"))\n            .withColumn(\n                \"logdiff\",\n                (\n                    f.col(\"max\")\n                    + f.log(\n                        f.exp(f.col(\"sumlogsum\") - f.col(\"max\"))\n                        - f.exp(f.col(\"logsum12\") - f.col(\"max\"))\n                    )\n                ),\n            )\n            .withColumn(\n                \"lH3abf\",\n                f.log(f.col(\"priorc1\"))\n                + f.log(f.col(\"priorc2\"))\n                + f.col(\"logdiff\"),\n            )\n            .drop(\"right_logsum\", \"left_logsum\", \"sumlogsum\", \"max\", \"logdiff\")\n            # h4\n            .withColumn(\"lH4abf\", f.log(f.col(\"priorc12\")) + f.col(\"logsum12\"))\n            # cleaning\n            .drop(\n                \"priorc1\", \"priorc2\", \"priorc12\", \"logsum1\", \"logsum2\", \"logsum12\"\n            )\n            # posteriors\n            .withColumn(\n                \"allABF\",\n                fml.array_to_vector(\n                    f.array(\n                        f.col(\"lH0abf\"),\n                        f.col(\"lH1abf\"),\n                        f.col(\"lH2abf\"),\n                        f.col(\"lH3abf\"),\n                        f.col(\"lH4abf\"),\n                    )\n                ),\n            )\n            .withColumn(\n                \"posteriors\", fml.vector_to_array(posteriors(f.col(\"allABF\")))\n            )\n            .withColumn(\"coloc_h0\", f.col(\"posteriors\").getItem(0))\n            .withColumn(\"coloc_h1\", f.col(\"posteriors\").getItem(1))\n            .withColumn(\"coloc_h2\", f.col(\"posteriors\").getItem(2))\n            .withColumn(\"coloc_h3\", f.col(\"posteriors\").getItem(3))\n            .withColumn(\"coloc_h4\", f.col(\"posteriors\").getItem(4))\n            .withColumn(\"coloc_h4_h3\", f.col(\"coloc_h4\") / f.col(\"coloc_h3\"))\n            .withColumn(\"coloc_log2_h4_h3\", f.log2(f.col(\"coloc_h4_h3\")))\n            # clean up\n            .drop(\n                \"posteriors\",\n                \"allABF\",\n                \"coloc_h4_h3\",\n                \"lH0abf\",\n                \"lH1abf\",\n                \"lH2abf\",\n                \"lH3abf\",\n                \"lH4abf\",\n            )\n            .withColumn(\"colocalisationMethod\", f.lit(\"COLOC\"))\n        )\n    )\n
"},{"location":"components/method/ecaviar/","title":"eCAVIAR","text":"

ECaviar-based colocalisation analysis.

It extends CAVIAR\u00a0framework to explicitly estimate the posterior probability that the same variant is causal in 2 studies while accounting for the uncertainty of LD. eCAVIAR computes the colocalization posterior probability (CLPP) by utilizing the marginal posterior probabilities. This framework allows for multiple variants to be causal in a single locus.

Source code in src/otg/method/colocalisation.py
class ECaviar:\n\"\"\"ECaviar-based colocalisation analysis.\n\n    It extends [CAVIAR](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5142122/#bib18)\u00a0framework to explicitly estimate the posterior probability that the same variant is causal in 2 studies while accounting for the uncertainty of LD. eCAVIAR computes the colocalization posterior probability (**CLPP**) by utilizing the marginal posterior probabilities. This framework allows for **multiple variants to be causal** in a single locus.\n    \"\"\"\n\n    @staticmethod\n    def _get_clpp(left_pp: Column, right_pp: Column) -> Column:\n\"\"\"Calculate the colocalisation posterior probability (CLPP).\n\n        If the fact that the same variant is found causal for two studies are independent events,\n        CLPP is defined as the product of posterior porbabilities that a variant is causal in both studies.\n\n        Args:\n            left_pp (Column): left posterior probability\n            right_pp (Column): right posterior probability\n\n        Returns:\n            Column: CLPP\n\n        Examples:\n            >>> d = [{\"left_pp\": 0.5, \"right_pp\": 0.5}, {\"left_pp\": 0.25, \"right_pp\": 0.75}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"clpp\", ECaviar._get_clpp(f.col(\"left_pp\"), f.col(\"right_pp\"))).show()\n            +-------+--------+------+\n            |left_pp|right_pp|  clpp|\n            +-------+--------+------+\n            |    0.5|     0.5|  0.25|\n            |   0.25|    0.75|0.1875|\n            +-------+--------+------+\n            <BLANKLINE>\n\n        \"\"\"\n        return left_pp * right_pp\n\n    @classmethod\n    def colocalise(\n        cls: type[ECaviar], overlapping_signals: StudyLocusOverlap\n    ) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n        Args:\n            overlapping_signals (StudyLocusOverlap): overlapping signals.\n\n        Returns:\n            Colocalisation: colocalisation results based on eCAVIAR.\n        \"\"\"\n        return Colocalisation(\n            _df=(\n                overlapping_signals.df.withColumn(\n                    \"clpp\",\n                    ECaviar._get_clpp(\n                        f.col(\"left_posteriorProbability\"),\n                        f.col(\"right_posteriorProbability\"),\n                    ),\n                )\n                .groupBy(\"left_studyLocusId\", \"right_studyLocusId\", \"chromosome\")\n                .agg(\n                    f.count(\"*\").alias(\"coloc_n_vars\"),\n                    f.sum(f.col(\"clpp\")).alias(\"clpp\"),\n                )\n                .withColumn(\"colocalisationMethod\", f.lit(\"eCAVIAR\"))\n            )\n        )\n
"},{"location":"components/method/ecaviar/#otg.method.colocalisation.ECaviar.colocalise","title":"colocalise(overlapping_signals) classmethod","text":"

Calculate bayesian colocalisation based on overlapping signals.

Parameters:

Name Type Description Default overlapping_signals StudyLocusOverlap

overlapping signals.

required

Returns:

Name Type Description Colocalisation Colocalisation

colocalisation results based on eCAVIAR.

Source code in src/otg/method/colocalisation.py
@classmethod\ndef colocalise(\n    cls: type[ECaviar], overlapping_signals: StudyLocusOverlap\n) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n    Args:\n        overlapping_signals (StudyLocusOverlap): overlapping signals.\n\n    Returns:\n        Colocalisation: colocalisation results based on eCAVIAR.\n    \"\"\"\n    return Colocalisation(\n        _df=(\n            overlapping_signals.df.withColumn(\n                \"clpp\",\n                ECaviar._get_clpp(\n                    f.col(\"left_posteriorProbability\"),\n                    f.col(\"right_posteriorProbability\"),\n                ),\n            )\n            .groupBy(\"left_studyLocusId\", \"right_studyLocusId\", \"chromosome\")\n            .agg(\n                f.count(\"*\").alias(\"coloc_n_vars\"),\n                f.sum(f.col(\"clpp\")).alias(\"clpp\"),\n            )\n            .withColumn(\"colocalisationMethod\", f.lit(\"eCAVIAR\"))\n        )\n    )\n
"},{"location":"components/method/ld_annotator/","title":"LD annotator","text":"

Class to annotate linkage disequilibrium (LD) operations from GnomAD.

Source code in src/otg/method/ld.py
class LDAnnotatorGnomad:\n\"\"\"Class to annotate linkage disequilibrium (LD) operations from GnomAD.\"\"\"\n\n    @staticmethod\n    def _query_block_matrix(\n        bm: BlockMatrix,\n        idxs: list[int],\n        starts: list[int],\n        stops: list[int],\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"Query block matrix for idxs rows sparsified by start/stop columns.\n\n        Args:\n            bm (BlockMatrix): LD matrix containing r values\n            idxs (List[int]): Row indexes to query (distinct and incremental)\n            starts (List[int]): Interval start column indexes (same size as idxs)\n            stops (List[int]): Interval stop column indexes (same size as idxs)\n            min_r2 (float): Minimum r2 to keep\n\n        Returns:\n            DataFrame: i,j,r where i and j are the row and column indexes and r is the LD\n\n        Examples:\n            >>> import numpy as np\n            >>> r = np.array([[1, 0.8, 0.7, 0.2],\n            ...               [0.8, 1, 0.6, 0.1],\n            ...               [0.7, 0.6, 1, 0.3],\n            ...               [0.2, 0.1, 0.3, 1]])\n            >>> bm_r = BlockMatrix.from_numpy(r) # doctest: +SKIP\n            >>> LDAnnotatorGnomad._query_block_matrix(bm_r, [1, 2], [0, 1], [3, 4], 0.5).show() # doctest: +SKIP\n            +---+---+---+\n            |  i|  j|  r|\n            +---+---+---+\n            |  0|  0|0.8|\n            |  0|  1|1.0|\n            |  1|  2|1.0|\n            +---+---+---+\n            <BLANKLINE>\n        \"\"\"\n        bm_sparsified = bm.filter_rows(idxs).sparsify_row_intervals(\n            starts, stops, blocks_only=True\n        )\n        entries = bm_sparsified.entries(keyed=False)\n\n        return (\n            entries.rename({\"entry\": \"r\"})\n            .to_spark()\n            .filter(f.col(\"r\") ** 2 >= min_r2)\n            .withColumn(\"r\", f.when(f.col(\"r\") >= 1, f.lit(1)).otherwise(f.col(\"r\")))\n        )\n\n    @staticmethod\n    def _variant_coordinates_in_ldindex(\n        variants_df: DataFrame,\n        ld_index: LDIndex,\n    ) -> DataFrame:\n\"\"\"Idxs for variants, first variant in the region and last variant in the region in precomputed ld index.\n\n        It checks if the window defined by the start/stop indices is maintained after lifting over the variants.\n\n        Args:\n            variants_df (DataFrame): Lead variants from `_annotate_index_intervals` output\n            ld_index (LDIndex): LD index precomputed\n\n        Returns:\n            DataFrame: LD coordinates [variantId, chromosome, gnomadPopulation, i, idxs, start_idx and stop_idx]\n        \"\"\"\n        w = Window.orderBy(\"chromosome\", \"idx\")\n        return (\n            variants_df.join(\n                ld_index.df,\n                on=[\"variantId\", \"chromosome\"],\n            )\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"gnomadPopulation\",\n                \"idx\",\n                \"start_idx\",\n                \"stop_idx\",\n            )\n            .distinct()\n            # necessary to resolve return of .entries() function\n            .withColumn(\"i\", f.row_number().over(w))\n            # the dataframe has to be ordered to query the block matrix\n            .orderBy(\"idx\")\n        )\n\n    @staticmethod\n    def weighted_r_overall(\n        chromosome: Column,\n        study_id: Column,\n        variant_id: Column,\n        tag_variant_id: Column,\n        relative_sample_size: Column,\n        r: Column,\n    ) -> Column:\n\"\"\"Aggregation of weighted R information using ancestry proportions.\n\n        The method implements a simple average weighted by the relative population sizes.\n\n        Args:\n            chromosome (Column): Chromosome\n            study_id (Column): Study identifier\n            variant_id (Column): Variant identifier\n            tag_variant_id (Column): Tag variant identifier\n            relative_sample_size (Column): Relative sample size\n            r (Column): Correlation\n\n        Returns:\n            Column: Estimates weighted R information\n\n        Examples:\n            >>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n            >>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n            >>> (\n            ...    spark.createDataFrame(data, columns)\n            ...     .withColumn('chr', f.lit('chr1'))\n            ...     .withColumn('study_id', f.lit('s1'))\n            ...     .withColumn('variant_id', f.lit('v1'))\n            ...     .withColumn(\n            ...         'r_overall',\n            ...         LDAnnotatorGnomad.weighted_r_overall(\n            ...             f.col('chr'),\n            ...             f.col('study_id'),\n            ...             f.col('variant_id'),\n            ...             f.col('tag_variant_id'),\n            ...             f.col('relative_sample_size'),\n            ...             f.col('r')\n            ...         )\n            ...     )\n            ...     .show()\n            ... )\n            +--------------+--------------------+----+----+--------+----------+---------+\n            |tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n            +--------------+--------------------+----+----+--------+----------+---------+\n            |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n            |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n            |            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n            +--------------+--------------------+----+----+--------+----------+---------+\n            <BLANKLINE>\n        \"\"\"\n        pseudo_r = f.when(r >= 1, 0.9999995).otherwise(r)\n        return f.round(\n            f.sum(pseudo_r * relative_sample_size).over(\n                Window.partitionBy(chromosome, study_id, variant_id, tag_variant_id)\n            ),\n            6,\n        )\n\n    @staticmethod\n    def _flag_partial_mapped(\n        study_id: Column, variant_id: Column, tag_variant_id: Column\n    ) -> Column:\n\"\"\"Generate flag for lead/tag pairs.\n\n        Some lead variants can be resolved in one population but not in other. Those rows interfere with PICS calculation, so they needs to be dropped.\n\n        Args:\n            study_id (Column): Study identifier column\n            variant_id (Column): Identifier of the lead variant\n            tag_variant_id (Column): Identifier of the tag variant\n\n        Returns:\n            Column: Boolean\n\n        Examples:\n            >>> data = [\n            ...     ('study_1', 'lead_1', 'tag_1'),  # <- keep row as tag available.\n            ...     ('study_1', 'lead_1', 'tag_2'),  # <- keep row as tag available.\n            ...     ('study_1', 'lead_2', 'tag_3'),  # <- keep row as tag available\n            ...     ('study_1', 'lead_2', None),  # <- drop row as lead 2 is resolved.\n            ...     ('study_1', 'lead_3', None)   # <- keep row as lead 3 is not resolved.\n            ... ]\n            >>> (\n            ...     spark.createDataFrame(data, ['studyId', 'variantId', 'tagVariantId'])\n            ...     .withColumn(\"flag_to_keep_tag\", LDAnnotatorGnomad._flag_partial_mapped(f.col('studyId'), f.col('variantId'), f.col('tagVariantId')))\n            ...     .show()\n            ... )\n            +-------+---------+------------+----------------+\n            |studyId|variantId|tagVariantId|flag_to_keep_tag|\n            +-------+---------+------------+----------------+\n            |study_1|   lead_1|       tag_1|            true|\n            |study_1|   lead_1|       tag_2|            true|\n            |study_1|   lead_2|       tag_3|            true|\n            |study_1|   lead_2|        null|           false|\n            |study_1|   lead_3|        null|            true|\n            +-------+---------+------------+----------------+\n            <BLANKLINE>\n        \"\"\"\n        return tag_variant_id.isNotNull() | ~f.array_contains(\n            f.collect_set(tag_variant_id.isNotNull()).over(\n                Window.partitionBy(study_id, variant_id)\n            ),\n            True,\n        )\n\n    @staticmethod\n    def get_ld_annotated_assocs_for_population(\n        population: str,\n        ld_index: LDIndex,\n        ld_matrix: BlockMatrix,\n        locus_ancestry: DataFrame,\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"This function annotates association data with LD information.\"\"\"\n        # map variants to precomputed LD indexes from gnomAD\n        variants_in_pop = locus_ancestry.filter(f.col(\"gnomadPopulation\") == population)\n        variants_ld_coordinates = LDAnnotatorGnomad._variant_coordinates_in_ldindex(\n            variants_in_pop, ld_index\n        ).persist()\n\n        # idxs for lead, first variant in the region and last variant in the region\n        variants_ld_scores = LDAnnotatorGnomad._query_block_matrix(\n            ld_matrix + ld_matrix.T,\n            variants_ld_coordinates.rdd.map(lambda x: x.idx).collect(),\n            variants_ld_coordinates.rdd.map(lambda x: x.start_idx).collect(),\n            variants_ld_coordinates.rdd.map(lambda x: x.stop_idx).collect(),\n            min_r2,\n        )\n\n        # aggregate LD info\n        variants_ld_info = variants_ld_scores.join(\n            f.broadcast(variants_ld_coordinates),\n            on=\"i\",\n            how=\"inner\",\n        ).select(\"variantId\", \"chromosome\", \"gnomadPopulation\", \"j\", \"r\")\n\n        variants_ld_coordinates.unpersist()\n        return LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop(\n            variants_ld_info=variants_ld_info,\n            ld_index=ld_index,\n        )\n\n    @classmethod\n    def variants_in_ld_in_gnomad_pop(\n        cls: type[LDAnnotatorGnomad],\n        variants_ld_info: DataFrame,\n        ld_index: LDIndex,\n    ) -> DataFrame:\n\"\"\"Return LD annotation for variants in specific gnomad population.\n\n        Args:\n            variants_ld_info (DataFrame): variant and their LD scores (r) and coordinates from the LD matrix of a population\n            ld_index (LDIndex): LD index precomputed\n\n        Returns:\n            DataFrame: LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n        \"\"\"\n        return (\n            variants_ld_info.alias(\"left\")\n            .join(\n                ld_index.df.select(\n                    f.col(\"chromosome\"),\n                    f.col(\"variantId\").alias(\"tagVariantId\"),\n                    f.col(\"idx\").alias(\"tag_idx\"),\n                ).alias(\"tags\"),\n                on=[\n                    f.col(\"left.chromosome\") == f.col(\"tags.chromosome\"),\n                    f.col(\"left.j\") == f.col(\"tags.tag_idx\"),\n                ],\n            )\n            .select(\n                \"variantId\", \"left.chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"\n            )\n        )\n\n    @classmethod\n    def ld_annotation_by_locus_ancestry(\n        cls: type[LDAnnotatorGnomad],\n        session: Session,\n        associations: StudyLocusGWASCatalog,\n        studies: StudyIndexGWASCatalog,\n        ld_populations: list[str],\n        ld_index_template: str,\n        ld_matrix_template: str,\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"LD information for all locus and ancestries.\n\n        Args:\n            session (Session): Session\n            associations (StudyLocusGWASCatalog): GWAS associations\n            studies (StudyIndexGWASCatalog): study metadata of the associations\n            ld_populations (list[str]): List of populations to annotate\n            ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n            ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n            min_r2 (float): minimum r2 to keep\n\n        Returns:\n            DataFrame: LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n        \"\"\"\n        # Unique lead - population pairs:\n        locus_ancestry = (\n            associations.unique_study_locus_ancestries(studies)\n            # Ignoring study information / relativeSampleSize to get unique lead-ancestry pairs\n            .drop(\"studyId\", \"relativeSampleSize\")\n            .distinct()\n            .persist()\n        )\n\n        # All gnomad populations captured in associations:\n        assoc_populations = locus_ancestry.rdd.map(\n            lambda x: x.gnomadPopulation\n        ).collect()\n\n        # Retrieve LD information from gnomAD\n        ld_annotated_assocs = []\n        for population in ld_populations:\n            if population in assoc_populations:\n                pop_parsed_ldindex_path = ld_index_template.format(POP=population)\n                pop_matrix_path = ld_matrix_template.format(POP=population)\n                ld_index = LDIndex.from_parquet(session, pop_parsed_ldindex_path)\n                ld_matrix = BlockMatrix.read(pop_matrix_path)\n                ld_annotated_assocs.append(\n                    LDAnnotatorGnomad.get_ld_annotated_assocs_for_population(\n                        population,\n                        ld_index,\n                        ld_matrix,\n                        locus_ancestry,\n                        min_r2,\n                    ).coalesce(400)\n                )\n        return reduce(DataFrame.unionByName, ld_annotated_assocs)\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.get_ld_annotated_assocs_for_population","title":"get_ld_annotated_assocs_for_population(population, ld_index, ld_matrix, locus_ancestry, min_r2) staticmethod","text":"

This function annotates association data with LD information.

Source code in src/otg/method/ld.py
@staticmethod\ndef get_ld_annotated_assocs_for_population(\n    population: str,\n    ld_index: LDIndex,\n    ld_matrix: BlockMatrix,\n    locus_ancestry: DataFrame,\n    min_r2: float,\n) -> DataFrame:\n\"\"\"This function annotates association data with LD information.\"\"\"\n    # map variants to precomputed LD indexes from gnomAD\n    variants_in_pop = locus_ancestry.filter(f.col(\"gnomadPopulation\") == population)\n    variants_ld_coordinates = LDAnnotatorGnomad._variant_coordinates_in_ldindex(\n        variants_in_pop, ld_index\n    ).persist()\n\n    # idxs for lead, first variant in the region and last variant in the region\n    variants_ld_scores = LDAnnotatorGnomad._query_block_matrix(\n        ld_matrix + ld_matrix.T,\n        variants_ld_coordinates.rdd.map(lambda x: x.idx).collect(),\n        variants_ld_coordinates.rdd.map(lambda x: x.start_idx).collect(),\n        variants_ld_coordinates.rdd.map(lambda x: x.stop_idx).collect(),\n        min_r2,\n    )\n\n    # aggregate LD info\n    variants_ld_info = variants_ld_scores.join(\n        f.broadcast(variants_ld_coordinates),\n        on=\"i\",\n        how=\"inner\",\n    ).select(\"variantId\", \"chromosome\", \"gnomadPopulation\", \"j\", \"r\")\n\n    variants_ld_coordinates.unpersist()\n    return LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop(\n        variants_ld_info=variants_ld_info,\n        ld_index=ld_index,\n    )\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.ld_annotation_by_locus_ancestry","title":"ld_annotation_by_locus_ancestry(session, associations, studies, ld_populations, ld_index_template, ld_matrix_template, min_r2) classmethod","text":"

LD information for all locus and ancestries.

Parameters:

Name Type Description Default session Session

Session

required associations StudyLocusGWASCatalog

GWAS associations

required studies StudyIndexGWASCatalog

study metadata of the associations

required ld_populations list[str]

List of populations to annotate

required ld_index_template str

Template path of the LD matrix index containing {POP} where the population is expected

required ld_matrix_template str

Template path of the LD matrix containing {POP} where the population is expected

required min_r2 float

minimum r2 to keep

required

Returns:

Name Type Description DataFrame DataFrame

LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]

Source code in src/otg/method/ld.py
@classmethod\ndef ld_annotation_by_locus_ancestry(\n    cls: type[LDAnnotatorGnomad],\n    session: Session,\n    associations: StudyLocusGWASCatalog,\n    studies: StudyIndexGWASCatalog,\n    ld_populations: list[str],\n    ld_index_template: str,\n    ld_matrix_template: str,\n    min_r2: float,\n) -> DataFrame:\n\"\"\"LD information for all locus and ancestries.\n\n    Args:\n        session (Session): Session\n        associations (StudyLocusGWASCatalog): GWAS associations\n        studies (StudyIndexGWASCatalog): study metadata of the associations\n        ld_populations (list[str]): List of populations to annotate\n        ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n        ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n        min_r2 (float): minimum r2 to keep\n\n    Returns:\n        DataFrame: LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n    \"\"\"\n    # Unique lead - population pairs:\n    locus_ancestry = (\n        associations.unique_study_locus_ancestries(studies)\n        # Ignoring study information / relativeSampleSize to get unique lead-ancestry pairs\n        .drop(\"studyId\", \"relativeSampleSize\")\n        .distinct()\n        .persist()\n    )\n\n    # All gnomad populations captured in associations:\n    assoc_populations = locus_ancestry.rdd.map(\n        lambda x: x.gnomadPopulation\n    ).collect()\n\n    # Retrieve LD information from gnomAD\n    ld_annotated_assocs = []\n    for population in ld_populations:\n        if population in assoc_populations:\n            pop_parsed_ldindex_path = ld_index_template.format(POP=population)\n            pop_matrix_path = ld_matrix_template.format(POP=population)\n            ld_index = LDIndex.from_parquet(session, pop_parsed_ldindex_path)\n            ld_matrix = BlockMatrix.read(pop_matrix_path)\n            ld_annotated_assocs.append(\n                LDAnnotatorGnomad.get_ld_annotated_assocs_for_population(\n                    population,\n                    ld_index,\n                    ld_matrix,\n                    locus_ancestry,\n                    min_r2,\n                ).coalesce(400)\n            )\n    return reduce(DataFrame.unionByName, ld_annotated_assocs)\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop","title":"variants_in_ld_in_gnomad_pop(variants_ld_info, ld_index) classmethod","text":"

Return LD annotation for variants in specific gnomad population.

Parameters:

Name Type Description Default variants_ld_info DataFrame

variant and their LD scores (r) and coordinates from the LD matrix of a population

required ld_index LDIndex

LD index precomputed

required

Returns:

Name Type Description DataFrame DataFrame

LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]

Source code in src/otg/method/ld.py
@classmethod\ndef variants_in_ld_in_gnomad_pop(\n    cls: type[LDAnnotatorGnomad],\n    variants_ld_info: DataFrame,\n    ld_index: LDIndex,\n) -> DataFrame:\n\"\"\"Return LD annotation for variants in specific gnomad population.\n\n    Args:\n        variants_ld_info (DataFrame): variant and their LD scores (r) and coordinates from the LD matrix of a population\n        ld_index (LDIndex): LD index precomputed\n\n    Returns:\n        DataFrame: LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n    \"\"\"\n    return (\n        variants_ld_info.alias(\"left\")\n        .join(\n            ld_index.df.select(\n                f.col(\"chromosome\"),\n                f.col(\"variantId\").alias(\"tagVariantId\"),\n                f.col(\"idx\").alias(\"tag_idx\"),\n            ).alias(\"tags\"),\n            on=[\n                f.col(\"left.chromosome\") == f.col(\"tags.chromosome\"),\n                f.col(\"left.j\") == f.col(\"tags.tag_idx\"),\n            ],\n        )\n        .select(\n            \"variantId\", \"left.chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"\n        )\n    )\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.weighted_r_overall","title":"weighted_r_overall(chromosome, study_id, variant_id, tag_variant_id, relative_sample_size, r) staticmethod","text":"

Aggregation of weighted R information using ancestry proportions.

The method implements a simple average weighted by the relative population sizes.

Parameters:

Name Type Description Default chromosome Column

Chromosome

required study_id Column

Study identifier

required variant_id Column

Variant identifier

required tag_variant_id Column

Tag variant identifier

required relative_sample_size Column

Relative sample size

required r Column

Correlation

required

Returns:

Name Type Description Column Column

Estimates weighted R information

Examples:

>>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n>>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n>>> (\n...    spark.createDataFrame(data, columns)\n...     .withColumn('chr', f.lit('chr1'))\n...     .withColumn('study_id', f.lit('s1'))\n...     .withColumn('variant_id', f.lit('v1'))\n...     .withColumn(\n...         'r_overall',\n...         LDAnnotatorGnomad.weighted_r_overall(\n...             f.col('chr'),\n...             f.col('study_id'),\n...             f.col('variant_id'),\n...             f.col('tag_variant_id'),\n...             f.col('relative_sample_size'),\n...             f.col('r')\n...         )\n...     )\n...     .show()\n... )\n+--------------+--------------------+----+----+--------+----------+---------+\n|tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n+--------------+--------------------+----+----+--------+----------+---------+\n|            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n|            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n|            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n+--------------+--------------------+----+----+--------+----------+---------+\n
Source code in src/otg/method/ld.py
@staticmethod\ndef weighted_r_overall(\n    chromosome: Column,\n    study_id: Column,\n    variant_id: Column,\n    tag_variant_id: Column,\n    relative_sample_size: Column,\n    r: Column,\n) -> Column:\n\"\"\"Aggregation of weighted R information using ancestry proportions.\n\n    The method implements a simple average weighted by the relative population sizes.\n\n    Args:\n        chromosome (Column): Chromosome\n        study_id (Column): Study identifier\n        variant_id (Column): Variant identifier\n        tag_variant_id (Column): Tag variant identifier\n        relative_sample_size (Column): Relative sample size\n        r (Column): Correlation\n\n    Returns:\n        Column: Estimates weighted R information\n\n    Examples:\n        >>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n        >>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n        >>> (\n        ...    spark.createDataFrame(data, columns)\n        ...     .withColumn('chr', f.lit('chr1'))\n        ...     .withColumn('study_id', f.lit('s1'))\n        ...     .withColumn('variant_id', f.lit('v1'))\n        ...     .withColumn(\n        ...         'r_overall',\n        ...         LDAnnotatorGnomad.weighted_r_overall(\n        ...             f.col('chr'),\n        ...             f.col('study_id'),\n        ...             f.col('variant_id'),\n        ...             f.col('tag_variant_id'),\n        ...             f.col('relative_sample_size'),\n        ...             f.col('r')\n        ...         )\n        ...     )\n        ...     .show()\n        ... )\n        +--------------+--------------------+----+----+--------+----------+---------+\n        |tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n        +--------------+--------------------+----+----+--------+----------+---------+\n        |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n        |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n        |            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n        +--------------+--------------------+----+----+--------+----------+---------+\n        <BLANKLINE>\n    \"\"\"\n    pseudo_r = f.when(r >= 1, 0.9999995).otherwise(r)\n    return f.round(\n        f.sum(pseudo_r * relative_sample_size).over(\n            Window.partitionBy(chromosome, study_id, variant_id, tag_variant_id)\n        ),\n        6,\n    )\n
"},{"location":"components/method/pics/","title":"PICS","text":"

Probabilistic Identification of Causal SNPs (PICS), an algorithm estimating the probability that an individual variant is causal considering the haplotype structure and observed pattern of association at the genetic locus.

Source code in src/otg/method/pics.py
class PICS:\n\"\"\"Probabilistic Identification of Causal SNPs (PICS), an algorithm estimating the probability that an individual variant is causal considering the haplotype structure and observed pattern of association at the genetic locus.\"\"\"\n\n    @staticmethod\n    def _pics_relative_posterior_probability(\n        neglog_p: float, pics_snp_mu: float, pics_snp_std: float\n    ) -> float:\n\"\"\"Compute the PICS posterior probability for a given SNP.\n\n        !!! info \"This probability needs to be scaled to take into account the probabilities of the other variants in the locus.\"\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            pics_snp_mu (float): Mean P value of the association between a SNP and a trait\n            pics_snp_std (float): Standard deviation for the P value of the association between a SNP and a trait\n\n        Returns:\n            Relative posterior probability of a SNP being causal in a locus\n\n        Examples:\n            >>> rel_prob = PICS._pics_relative_posterior_probability(neglog_p=10.0, pics_snp_mu=1.0, pics_snp_std=10.0)\n            >>> round(rel_prob, 3)\n            0.368\n        \"\"\"\n        return float(norm(pics_snp_mu, pics_snp_std).sf(neglog_p) * 2)\n\n    @staticmethod\n    def _pics_standard_deviation(neglog_p: float, r2: float, k: float) -> float | None:\n\"\"\"Compute the PICS standard deviation.\n\n        This distribution is obtained after a series of permutation tests described in the PICS method, and it is only\n        valid when the SNP is highly linked with the lead (r2 > 0.5).\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            r2 (float): LD score between a given SNP and the lead variant\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            Standard deviation for the P value of the association between a SNP and a trait\n\n        Examples:\n            >>> PICS._pics_standard_deviation(neglog_p=1.0, r2=1.0, k=6.4)\n            0.0\n            >>> round(PICS._pics_standard_deviation(neglog_p=10.0, r2=0.5, k=6.4), 3)\n            0.143\n            >>> print(PICS._pics_standard_deviation(neglog_p=1.0, r2=0.0, k=6.4))\n            None\n        \"\"\"\n        return (\n            (1 - abs(r2) ** 0.5**k) ** 0.5 * (neglog_p) ** 0.5 / 2\n            if r2 >= 0.5\n            else None\n        )\n\n    @staticmethod\n    def _pics_mu(neglog_p: float, r2: float) -> float | None:\n\"\"\"Compute the PICS mu that estimates the probability of association between a given SNP and the trait.\n\n        This distribution is obtained after a series of permutation tests described in the PICS method, and it is only\n        valid when the SNP is highly linked with the lead (r2 > 0.5).\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            r2 (float): LD score between a given SNP and the lead variant\n\n        Returns:\n            Mean P value of the association between a SNP and a trait\n\n        Examples:\n            >>> PICS._pics_mu(neglog_p=1.0, r2=1.0)\n            1.0\n            >>> PICS._pics_mu(neglog_p=10.0, r2=0.5)\n            5.0\n            >>> print(PICS._pics_mu(neglog_p=10.0, r2=0.3))\n            None\n        \"\"\"\n        return neglog_p * r2 if r2 >= 0.5 else None\n\n    @staticmethod\n    def _finemap(\n        credible_set: list[Row], lead_neglog_p: float, k: float\n    ) -> list | None:\n\"\"\"Calculates the probability of a variant being causal in a study-locus context by applying the PICS method.\n\n        It is intended to be applied as an UDF in `PICS.finemap`, where each row is a StudyLocus association.\n        The function iterates over every SNP in the `credibleSet` array, and it returns an updated credibleSet with\n        its association signal and causality probability as of PICS.\n\n        Args:\n            credible_set (list): list of tagging variants after expanding the locus\n            lead_neglog_p (float): P value of the association signal between the lead variant and the study in the form of -log10.\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            List of tagging variants with an estimation of the association signal and their posterior probability as of PICS.\n        \"\"\"\n        if credible_set is None:\n            return None\n        elif not credible_set:\n            return []\n\n        tmp_credible_set = []\n        new_credible_set = []\n        # First iteration: calculation of mu, standard deviation, and the relative posterior probability\n        for tag_struct in credible_set:\n            tag_dict = (\n                tag_struct.asDict()\n            )  # tag_struct is of type pyspark.Row, we'll represent it as a dict\n            if (\n                not tag_dict[\"r2Overall\"]\n                or tag_dict[\"r2Overall\"] < 0.5\n                or not lead_neglog_p\n            ):\n                # If PICS cannot be calculated, we'll return the original credible set\n                new_credible_set.append(tag_dict)\n                continue\n            pics_snp_mu = PICS._pics_mu(lead_neglog_p, tag_dict[\"r2Overall\"])\n            pics_snp_std = PICS._pics_standard_deviation(\n                lead_neglog_p, tag_dict[\"r2Overall\"], k\n            )\n            pics_snp_std = 0.001 if pics_snp_std == 0 else pics_snp_std\n            if pics_snp_mu is not None and pics_snp_std is not None:\n                posterior_probability = PICS._pics_relative_posterior_probability(\n                    lead_neglog_p, pics_snp_mu, pics_snp_std\n                )\n                tag_dict[\"tagPValue\"] = 10**-pics_snp_mu\n                tag_dict[\"tagStandardError\"] = 10**-pics_snp_std\n                tag_dict[\"relativePosteriorProbability\"] = posterior_probability\n\n                tmp_credible_set.append(tag_dict)\n\n        # Second iteration: calculation of the sum of all the posteriors in each study-locus, so that we scale them between 0-1\n        total_posteriors = sum(\n            tag_dict.get(\"relativePosteriorProbability\", 0)\n            for tag_dict in tmp_credible_set\n        )\n\n        # Third iteration: calculation of the final posteriorProbability\n        for tag_dict in tmp_credible_set:\n            if total_posteriors != 0:\n                tag_dict[\"posteriorProbability\"] = float(\n                    tag_dict.get(\"relativePosteriorProbability\", 0) / total_posteriors\n                )\n            tag_dict.pop(\"relativePosteriorProbability\")\n            new_credible_set.append(tag_dict)\n        return new_credible_set\n\n    @classmethod\n    def finemap(\n        cls: type[PICS], associations: StudyLocus, k: float = 6.4\n    ) -> StudyLocus:\n\"\"\"Run PICS on a study locus.\n\n        !!! info \"Study locus needs to be LD annotated\"\n            The study locus needs to be LD annotated before PICS can be calculated.\n\n        Args:\n            associations (StudyLocus): Study locus to finemap using PICS\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            StudyLocus: Study locus with PICS results\n        \"\"\"\n        # Register UDF by defining the structure of the output credibleSet array of structs\n        credset_schema = t.ArrayType(\n            [field.dataType.elementType for field in associations.schema if field.name == \"credibleSet\"][0]  # type: ignore\n        )\n        _finemap_udf = f.udf(\n            lambda credible_set, neglog_p: PICS._finemap(credible_set, neglog_p, k),\n            credset_schema,\n        )\n\n        associations.df = (\n            associations.df.withColumn(\"neglog_pvalue\", associations.neglog_pvalue())\n            .withColumn(\n                \"credibleSet\",\n                f.when(\n                    f.col(\"credibleSet\").isNotNull(),\n                    _finemap_udf(f.col(\"credibleSet\"), f.col(\"neglog_pvalue\")),\n                ),\n            )\n            .drop(\"neglog_pvalue\")\n        )\n        return associations\n
"},{"location":"components/method/pics/#otg.method.pics.PICS.finemap","title":"finemap(associations, k=6.4) classmethod","text":"

Run PICS on a study locus.

Study locus needs to be LD annotated

The study locus needs to be LD annotated before PICS can be calculated.

Parameters:

Name Type Description Default associations StudyLocus

Study locus to finemap using PICS

required k float

Empiric constant that can be adjusted to fit the curve, 6.4 recommended.

6.4

Returns:

Name Type Description StudyLocus StudyLocus

Study locus with PICS results

Source code in src/otg/method/pics.py
@classmethod\ndef finemap(\n    cls: type[PICS], associations: StudyLocus, k: float = 6.4\n) -> StudyLocus:\n\"\"\"Run PICS on a study locus.\n\n    !!! info \"Study locus needs to be LD annotated\"\n        The study locus needs to be LD annotated before PICS can be calculated.\n\n    Args:\n        associations (StudyLocus): Study locus to finemap using PICS\n        k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n    Returns:\n        StudyLocus: Study locus with PICS results\n    \"\"\"\n    # Register UDF by defining the structure of the output credibleSet array of structs\n    credset_schema = t.ArrayType(\n        [field.dataType.elementType for field in associations.schema if field.name == \"credibleSet\"][0]  # type: ignore\n    )\n    _finemap_udf = f.udf(\n        lambda credible_set, neglog_p: PICS._finemap(credible_set, neglog_p, k),\n        credset_schema,\n    )\n\n    associations.df = (\n        associations.df.withColumn(\"neglog_pvalue\", associations.neglog_pvalue())\n        .withColumn(\n            \"credibleSet\",\n            f.when(\n                f.col(\"credibleSet\").isNotNull(),\n                _finemap_udf(f.col(\"credibleSet\"), f.col(\"neglog_pvalue\")),\n            ),\n        )\n        .drop(\"neglog_pvalue\")\n    )\n    return associations\n
"},{"location":"components/method/window_based_clumping/","title":"Window-based clumping","text":"

Get semi-lead snps from summary statistics using a window based function.

Source code in src/otg/method/window_based_clumping.py
class WindowBasedClumping:\n\"\"\"Get semi-lead snps from summary statistics using a window based function.\"\"\"\n\n    @staticmethod\n    def _identify_cluster_peaks(\n        study: Column, chromosome: Column, position: Column, window_length: int\n    ) -> Column:\n\"\"\"Cluster GWAS significant variants, were clusters are separated by a defined distance.\n\n        !! Important to note that the length of the clusters can be arbitrarily big.\n\n        Args:\n            study (Column): study identifier\n            chromosome (Column): chromosome identifier\n            position (Column): position of the variant\n            window_length (int): window length in basepair\n\n        Returns:\n            Column: containing cluster identifier\n\n        Examples:\n            >>> data = [\n            ...     # Cluster 1:\n            ...     ('s1', 'chr1', 2),\n            ...     ('s1', 'chr1', 4),\n            ...     ('s1', 'chr1', 12),\n            ...     # Cluster 2 - Same chromosome:\n            ...     ('s1', 'chr1', 31),\n            ...     ('s1', 'chr1', 38),\n            ...     ('s1', 'chr1', 42),\n            ...     # Cluster 3 - New chromosome:\n            ...     ('s1', 'chr2', 41),\n            ...     ('s1', 'chr2', 44),\n            ...     ('s1', 'chr2', 50),\n            ...     # Cluster 4 - other study:\n            ...     ('s2', 'chr2', 55),\n            ...     ('s2', 'chr2', 62),\n            ...     ('s2', 'chr2', 70),\n            ... ]\n            >>> window_length = 10\n            >>> (\n            ...     spark.createDataFrame(data, ['studyId', 'chromosome', 'position'])\n            ...     .withColumn(\"cluster_id\",\n            ...         WindowBasedClumping._identify_cluster_peaks(\n            ...             f.col('studyId'),\n            ...             f.col('chromosome'),\n            ...             f.col('position'),\n            ...             window_length\n            ...         )\n            ...     ).show()\n            ... )\n            +-------+----------+--------+----------+\n            |studyId|chromosome|position|cluster_id|\n            +-------+----------+--------+----------+\n            |     s1|      chr1|       2| s1_chr1_2|\n            |     s1|      chr1|       4| s1_chr1_2|\n            |     s1|      chr1|      12| s1_chr1_2|\n            |     s1|      chr1|      31|s1_chr1_31|\n            |     s1|      chr1|      38|s1_chr1_31|\n            |     s1|      chr1|      42|s1_chr1_31|\n            |     s1|      chr2|      41|s1_chr2_41|\n            |     s1|      chr2|      44|s1_chr2_41|\n            |     s1|      chr2|      50|s1_chr2_41|\n            |     s2|      chr2|      55|s2_chr2_55|\n            |     s2|      chr2|      62|s2_chr2_55|\n            |     s2|      chr2|      70|s2_chr2_55|\n            +-------+----------+--------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # By adding previous position, the cluster boundary can be identified:\n        previous_position = f.lag(position).over(\n            Window.partitionBy(study, chromosome).orderBy(position)\n        )\n        # We consider a cluster boudary if subsequent snps are further than the defined window:\n        cluster_id = f.when(\n            (previous_position.isNull())\n            | (position - previous_position > window_length),\n            f.concat_ws(\"_\", study, chromosome, position),\n        )\n        # The cluster identifier is propagated across every variant of the cluster:\n        return f.when(\n            cluster_id.isNull(),\n            f.last(cluster_id, ignorenulls=True).over(\n                Window.partitionBy(study, chromosome)\n                .orderBy(position)\n                .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n            ),\n        ).otherwise(cluster_id)\n\n    @staticmethod\n    @f.udf(VectorUDT())\n    def _find_peak(position: ndarray, window_size: int) -> DenseVector:\n\"\"\"Establish lead snps based on their positions listed by p-value.\n\n        The function `find_peak` assigns lead SNPs based on their positions listed by p-value within a specified window size.\n\n        Args:\n            position (ndarray): positions of the SNPs sorted by p-value.\n            window_size (int): the distance in bp within which associations are clumped together around the lead snp.\n\n        Returns:\n            DenseVector: binary vector where 1 indicates a lead SNP and 0 indicates a non-lead SNP.\n\n        Examples:\n            >>> from pyspark.ml import functions as fml\n            >>> data = [\n            ...     ('c', 3, 4.0, True),\n            ...     ('c', 4, 2.0, False),\n            ...     ('c', 6, 1.0, True),\n            ...     ('c', 8, 2.5, False),\n            ...     ('c', 9, 3.0, True)\n            ... ]\n            >>> (\n            ...     spark.createDataFrame(data, ['cluster', 'position', 'negLogPValue', 'isSemiIndex'])\n            ...     .withColumn(\n            ...        'collected_positions',\n            ...         f.collect_list(\n            ...             f.col('position'))\n            ...         .over(\n            ...             Window.partitionBy('cluster')\n            ...             .orderBy(f.col('negLogPValue').desc())\n            ...             .rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)\n            ...         )\n            ...     )\n            ...     .withColumn('isLeadList', WindowBasedClumping._find_peak(fml.array_to_vector(f.col('collected_positions')), f.lit(2)))\n            ...     .show(truncate=False)\n            ... )\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            |cluster|position|negLogPValue|isSemiIndex|collected_positions|isLeadList           |\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            |c      |3       |4.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |9       |3.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |8       |2.5         |false      |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |4       |2.0         |false      |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |6       |1.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Initializing the lead list with zeroes:\n        is_lead: ndarray = np.zeros(len(position))\n\n        # List containing indices of leads:\n        lead_indices: list = []\n\n        # Looping through all positions:\n        for index in range(len(position)):\n            # Looping through leads to find out if they are within a window:\n            for lead_index in lead_indices:\n                # If any of the leads within the window:\n                if abs(position[lead_index] - position[index]) < window_size:\n                    # Skipping further checks:\n                    break\n            else:\n                # None of the leads were within the window:\n                lead_indices.append(index)\n                is_lead[index] = 1\n\n        return DenseVector(is_lead)\n\n    @staticmethod\n    def _filter_leads(clump: Column, window_length: int) -> Column:\n\"\"\"Filter lead snps from a column containing clumps with prioritised variants.\n\n        Args:\n            clump (Column): column containing array of structs with all variants in the clump sorted by priority.\n            window_length (int): window length in basepair\n\n        Returns:\n            Column: column containing array of structs with only lead variants.\n\n        Examples:\n            >>> data = [\n            ...     ('v6', 10),\n            ...     ('v4', 6),\n            ...     ('v1', 3),\n            ...     ('v2', 4),\n            ...     ('v3', 5),\n            ...     ('v5', 8),\n            ...     ('v7', 13),\n            ...     ('v8', 20)\n            ... ]\n            >>> window_length = 2\n            >>> (\n            ...    spark.createDataFrame(data, ['variantId', 'position']).withColumn(\"study\", f.lit(\"s1\"))\n            ...    .groupBy(\"study\")\n            ...    .agg(f.collect_list(f.struct(\"*\")).alias(\"clump\"))\n            ...    .select(WindowBasedClumping._filter_leads(f.col('clump'), window_length).alias(\"filtered_clump\"))\n            ...    .show(truncate=False)\n            ... )\n            +---------------------------------------------------------------------------------------------------------------+\n            |filtered_clump                                                                                                 |\n            +---------------------------------------------------------------------------------------------------------------+\n            |[{v6, 10, s1, 1.0}, {v4, 6, s1, 1.0}, {v1, 3, s1, 1.0}, {v5, 8, s1, 1.0}, {v7, 13, s1, 1.0}, {v8, 20, s1, 1.0}]|\n            +---------------------------------------------------------------------------------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Combine the lead position vector with the aggregated fields and dropping non-lead snps:\n        return f.filter(\n            f.zip_with(\n                clump,\n                # Extract the position vector and identify positions of the leads:\n                fml.vector_to_array(\n                    WindowBasedClumping._find_peak(\n                        fml.array_to_vector(f.transform(clump, lambda x: x.position)),\n                        f.lit(window_length),\n                    )\n                ),\n                lambda x, y: f.when(y == 1.0, x.withField(\"isLead\", y)),\n            ),\n            lambda col: col.isNotNull(),\n        )\n\n    @staticmethod\n    def _collect_clump(mantissa: Column, exponent: Column) -> Column:\n\"\"\"Collect clump into a sorted struct.\n\n        Args:\n            mantissa (Column): mantissa of the p-value\n            exponent (Column): exponent of the p-value\n\n        Returns:\n            Column: struct containing clumped variants sorted by negLogPValue in descending order\n\n        Examples:\n            >>> data = [\n            ...     ('clump_1', 2, 0.1, -1),\n            ...     ('clump_1', 4, 0.2, -1),\n            ...     ('clump_1', 12, 0.3, -1),\n            ...     ('clump_1', 31, 0.4, -1),\n            ...     ('clump_1', 38, 0.5, -1),\n            ...     ('clump_1', 42, 0.6, -1),\n            ...     ('clump_2', 41, 0.7, -1),\n            ...     ('clump_2', 44, 0.8, -1),\n            ...     ('clump_2', 50, 0.9, -1),\n            ...     ('clump_3', 55, 1.0, -1),\n            ...     ('clump_3', 62, 1.1, -1),\n            ...     ('clump_3', 70, 1.2, -1),\n            ... ]\n            >>> (\n            ...    spark.createDataFrame(data, ['clump_id', 'position', 'pValueMantissa', 'pValueExponent'])\n            ...     .groupBy('clump_id')\n            ...     .agg(WindowBasedClumping._collect_clump(\n            ...                 f.col('pValueMantissa'),\n            ...                 f.col('pValueExponent')\n            ...             ).alias(\"clump\")\n            ...     ).show(truncate=False)\n            ... )\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            |clump_id|clump                                                                                                                                                                                                                                                  |\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            |clump_1 |[{2.0, clump_1, 2, 0.1, -1}, {1.6989700043360187, clump_1, 4, 0.2, -1}, {1.5228787452803376, clump_1, 12, 0.3, -1}, {1.3979400086720375, clump_1, 31, 0.4, -1}, {1.3010299956639813, clump_1, 38, 0.5, -1}, {1.2218487496163564, clump_1, 42, 0.6, -1}]|\n            |clump_2 |[{1.154901959985743, clump_2, 41, 0.7, -1}, {1.0969100130080565, clump_2, 44, 0.8, -1}, {1.045757490560675, clump_2, 50, 0.9, -1}]                                                                                                                     |\n            |clump_3 |[{1.0, clump_3, 55, 1.0, -1}, {0.958607314841775, clump_3, 62, 1.1, -1}, {0.9208187539523752, clump_3, 70, 1.2, -1}]                                                                                                                                   |\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.sort_array(\n            f.collect_list(\n                f.struct(\n                    calculate_neglog_pvalue(mantissa, exponent).alias(\"negLogPValue\"),\n                    \"*\",\n                )\n            ),\n            False,\n        )\n\n    @classmethod\n    def clump(\n        cls: type[WindowBasedClumping],\n        summary_stats: SummaryStatistics,\n        window_length: int,\n    ) -> StudyLocus:\n\"\"\"Clump summary statistics by distance.\n\n        Args:\n            summary_stats (SummaryStatistics): summary statistics to clump\n            window_length (int): window length in basepair\n\n        Returns:\n            StudyLocus: clumped summary statistics\n        \"\"\"\n        return StudyLocus(\n            _df=summary_stats.df.withColumn(\n                \"cluster_id\",\n                # First identify clusters of variants within the window\n                WindowBasedClumping._identify_cluster_peaks(\n                    f.col(\"studyId\"),\n                    f.col(\"chromosome\"),\n                    f.col(\"position\"),\n                    window_length,\n                ),\n            )\n            .groupBy(\"cluster_id\")\n            # Aggregating all data from each cluster:\n            .agg(\n                WindowBasedClumping._collect_clump(\n                    f.col(\"pValueMantissa\"), f.col(\"pValueExponent\")\n                ).alias(\"clump\")\n            )\n            # Explode and identify the index variant representative of the cluster:\n            .withColumn(\n                \"exploded\",\n                f.explode(\n                    WindowBasedClumping._filter_leads(f.col(\"clump\"), window_length)\n                ),\n            )\n            .select(\"exploded.*\")\n            # Dropping helper columns:\n            .drop(\"isLead\", \"negLogPValue\", \"cluster_id\")\n            # assign study-locus id:\n            .withColumn(\"studyLocusId\", get_study_locus_id(\"studyId\", \"variantId\"))\n        )\n
"},{"location":"components/method/window_based_clumping/#otg.method.window_based_clumping.WindowBasedClumping.clump","title":"clump(summary_stats, window_length) classmethod","text":"

Clump summary statistics by distance.

Parameters:

Name Type Description Default summary_stats SummaryStatistics

summary statistics to clump

required window_length int

window length in basepair

required

Returns:

Name Type Description StudyLocus StudyLocus

clumped summary statistics

Source code in src/otg/method/window_based_clumping.py
@classmethod\ndef clump(\n    cls: type[WindowBasedClumping],\n    summary_stats: SummaryStatistics,\n    window_length: int,\n) -> StudyLocus:\n\"\"\"Clump summary statistics by distance.\n\n    Args:\n        summary_stats (SummaryStatistics): summary statistics to clump\n        window_length (int): window length in basepair\n\n    Returns:\n        StudyLocus: clumped summary statistics\n    \"\"\"\n    return StudyLocus(\n        _df=summary_stats.df.withColumn(\n            \"cluster_id\",\n            # First identify clusters of variants within the window\n            WindowBasedClumping._identify_cluster_peaks(\n                f.col(\"studyId\"),\n                f.col(\"chromosome\"),\n                f.col(\"position\"),\n                window_length,\n            ),\n        )\n        .groupBy(\"cluster_id\")\n        # Aggregating all data from each cluster:\n        .agg(\n            WindowBasedClumping._collect_clump(\n                f.col(\"pValueMantissa\"), f.col(\"pValueExponent\")\n            ).alias(\"clump\")\n        )\n        # Explode and identify the index variant representative of the cluster:\n        .withColumn(\n            \"exploded\",\n            f.explode(\n                WindowBasedClumping._filter_leads(f.col(\"clump\"), window_length)\n            ),\n        )\n        .select(\"exploded.*\")\n        # Dropping helper columns:\n        .drop(\"isLead\", \"negLogPValue\", \"cluster_id\")\n        # assign study-locus id:\n        .withColumn(\"studyLocusId\", get_study_locus_id(\"studyId\", \"variantId\"))\n    )\n
"},{"location":"components/step/colocalisation/","title":"Colocalisation","text":"

Bases: ColocalisationStepConfig

Colocalisation step.

This workflow runs colocalization analyses that assess the degree to which independent signals of the association share the same causal variant in a region of the genome, typically limited by linkage disequilibrium (LD).

Source code in src/otg/colocalisation.py
@dataclass\nclass ColocalisationStep(ColocalisationStepConfig):\n\"\"\"Colocalisation step.\n\n    This workflow runs colocalization analyses that assess the degree to which independent signals of the association share the same causal variant in a region of the genome, typically limited by linkage disequilibrium (LD).\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: ColocalisationStep) -> None:\n\"\"\"Run colocalisation step.\"\"\"\n        # Study-locus information\n        sl = StudyLocus.from_parquet(self.session, self.study_locus_path)\n        si = StudyIndex.from_parquet(self.session, self.study_index_path)\n\n        # Study-locus overlaps for 95% credible sets\n        sl_overlaps = sl.credible_set(CredibleInterval.IS95).overlaps(si)\n\n        coloc_results = Coloc.colocalise(\n            sl_overlaps, self.priorc1, self.priorc2, self.priorc12\n        )\n        ecaviar_results = ECaviar.colocalise(sl_overlaps)\n\n        coloc_results.df.unionByName(ecaviar_results.df, allowMissingColumns=True)\n\n        coloc_results.df.write.mode(self.session.write_mode).parquet(self.coloc_path)\n

Colocalisation step requirements.

Attributes:

Name Type Description study_locus_path DictConfig

Input Study-locus path.

coloc_path DictConfig

Output Colocalisation path.

priorc1 float

Prior on variant being causal for trait 1.

priorc2 float

Prior on variant being causal for trait 2.

priorc12 float

Prior on variant being causal for traits 1 and 2.

Source code in src/otg/config.py
@dataclass\nclass ColocalisationStepConfig:\n\"\"\"Colocalisation step requirements.\n\n    Attributes:\n        study_locus_path (DictConfig): Input Study-locus path.\n        coloc_path (DictConfig): Output Colocalisation path.\n        priorc1 (float): Prior on variant being causal for trait 1.\n        priorc2 (float): Prior on variant being causal for trait 2.\n        priorc12 (float): Prior on variant being causal for traits 1 and 2.\n    \"\"\"\n\n    _target_: str = \"otg.colocalisation.ColocalisationStep\"\n    study_locus_path: str = MISSING\n    study_index_path: str = MISSING\n    coloc_path: str = MISSING\n    priorc1: float = 1e-4\n    priorc2: float = 1e-4\n    priorc12: float = 1e-5\n
"},{"location":"components/step/colocalisation/#otg.colocalisation.ColocalisationStep.run","title":"run()","text":"

Run colocalisation step.

Source code in src/otg/colocalisation.py
def run(self: ColocalisationStep) -> None:\n\"\"\"Run colocalisation step.\"\"\"\n    # Study-locus information\n    sl = StudyLocus.from_parquet(self.session, self.study_locus_path)\n    si = StudyIndex.from_parquet(self.session, self.study_index_path)\n\n    # Study-locus overlaps for 95% credible sets\n    sl_overlaps = sl.credible_set(CredibleInterval.IS95).overlaps(si)\n\n    coloc_results = Coloc.colocalise(\n        sl_overlaps, self.priorc1, self.priorc2, self.priorc12\n    )\n    ecaviar_results = ECaviar.colocalise(sl_overlaps)\n\n    coloc_results.df.unionByName(ecaviar_results.df, allowMissingColumns=True)\n\n    coloc_results.df.write.mode(self.session.write_mode).parquet(self.coloc_path)\n
"},{"location":"components/step/finngen/","title":"FinnGen","text":"

Bases: FinnGenStepConfig

FinnGen study table ingestion step.

Source code in src/otg/finngen.py
@dataclass\nclass FinnGenStep(FinnGenStepConfig):\n\"\"\"FinnGen study table ingestion step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: FinnGenStep) -> None:\n\"\"\"Run FinnGen study table ingestion step.\"\"\"\n        # Read the JSON data from the URL.\n        json_data = urlopen(self.finngen_phenotype_table_url).read().decode(\"utf-8\")\n        rdd = self.session.spark.sparkContext.parallelize([json_data])\n        df = self.session.spark.read.json(rdd)\n\n        # Parse the study index data.\n        finngen_studies = StudyIndexFinnGen.from_source(\n            df,\n            self.finngen_release_prefix,\n            self.finngen_sumstat_url_prefix,\n            self.finngen_sumstat_url_suffix,\n        )\n\n        # Write the output.\n        finngen_studies.df.write.mode(self.session.write_mode).parquet(\n            self.finngen_study_index_out\n        )\n

FinnGen study table ingestion step requirements.

Attributes:

Name Type Description finngen_phenotype_table_url str

FinnGen API for fetching the list of studies.

finngen_release_prefix str

Release prefix pattern.

finngen_sumstat_url_prefix str

URL prefix for summary statistics location.

finngen_sumstat_url_suffix str

URL prefix suffix for summary statistics location.

finngen_study_index_out str

Output path for the FinnGen study index dataset.

Source code in src/otg/config.py
@dataclass\nclass FinnGenStepConfig:\n\"\"\"FinnGen study table ingestion step requirements.\n\n    Attributes:\n        finngen_phenotype_table_url (str): FinnGen API for fetching the list of studies.\n        finngen_release_prefix (str): Release prefix pattern.\n        finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n        finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n        finngen_study_index_out (str): Output path for the FinnGen study index dataset.\n    \"\"\"\n\n    _target_: str = \"otg.finngen.FinnGenStep\"\n    finngen_phenotype_table_url: str = MISSING\n    finngen_release_prefix: str = MISSING\n    finngen_sumstat_url_prefix: str = MISSING\n    finngen_sumstat_url_suffix: str = MISSING\n    finngen_study_index_out: str = MISSING\n
"},{"location":"components/step/finngen/#otg.finngen.FinnGenStep.run","title":"run()","text":"

Run FinnGen study table ingestion step.

Source code in src/otg/finngen.py
def run(self: FinnGenStep) -> None:\n\"\"\"Run FinnGen study table ingestion step.\"\"\"\n    # Read the JSON data from the URL.\n    json_data = urlopen(self.finngen_phenotype_table_url).read().decode(\"utf-8\")\n    rdd = self.session.spark.sparkContext.parallelize([json_data])\n    df = self.session.spark.read.json(rdd)\n\n    # Parse the study index data.\n    finngen_studies = StudyIndexFinnGen.from_source(\n        df,\n        self.finngen_release_prefix,\n        self.finngen_sumstat_url_prefix,\n        self.finngen_sumstat_url_suffix,\n    )\n\n    # Write the output.\n    finngen_studies.df.write.mode(self.session.write_mode).parquet(\n        self.finngen_study_index_out\n    )\n
"},{"location":"components/step/gene_index/","title":"Gene index","text":"

Bases: GeneIndexStepConfig

Gene index step.

This step generates a gene index dataset from an Open Targets Platform target dataset.

Source code in src/otg/gene_index.py
@dataclass\nclass GeneIndexStep(GeneIndexStepConfig):\n\"\"\"Gene index step.\n\n    This step generates a gene index dataset from an Open Targets Platform target dataset.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: GeneIndexStep) -> None:\n\"\"\"Run Target index step.\"\"\"\n        # Extract\n        platform_target = self.session.spark.read.parquet(self.target_path)\n        # Transform\n        gene_index = GeneIndex.from_source(platform_target)\n        # Load\n        gene_index.df.write.mode(self.session.write_mode).parquet(self.gene_index_path)\n

Gene index step requirements.

Attributes:

Name Type Description target_path str

Open targets Platform target dataset path.

gene_index_path str

Output gene index path.

Source code in src/otg/config.py
@dataclass\nclass GeneIndexStepConfig:\n\"\"\"Gene index step requirements.\n\n    Attributes:\n        target_path (str): Open targets Platform target dataset path.\n        gene_index_path (str): Output gene index path.\n    \"\"\"\n\n    _target_: str = \"otg.gene_index.GeneIndexStep\"\n    target_path: str = MISSING\n    gene_index_path: str = MISSING\n
"},{"location":"components/step/gene_index/#otg.gene_index.GeneIndexStep.run","title":"run()","text":"

Run Target index step.

Source code in src/otg/gene_index.py
def run(self: GeneIndexStep) -> None:\n\"\"\"Run Target index step.\"\"\"\n    # Extract\n    platform_target = self.session.spark.read.parquet(self.target_path)\n    # Transform\n    gene_index = GeneIndex.from_source(platform_target)\n    # Load\n    gene_index.df.write.mode(self.session.write_mode).parquet(self.gene_index_path)\n
"},{"location":"components/step/gwas_catalog/","title":"GWAS Catalog","text":"

Bases: GWASCatalogStepConfig

GWAS Catalog step.

Source code in src/otg/gwas_catalog.py
@dataclass\nclass GWASCatalogStep(GWASCatalogStepConfig):\n\"\"\"GWAS Catalog step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: GWASCatalogStep) -> None:\n\"\"\"Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.\"\"\"\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n        # All inputs:\n        # Variant annotation dataset\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n        # GWAS Catalog raw study information\n        catalog_studies = self.session.spark.read.csv(\n            self.catalog_studies_file, sep=\"\\t\", header=True\n        )\n        # GWAS Catalog ancestry information\n        ancestry_lut = self.session.spark.read.csv(\n            self.catalog_ancestry_file, sep=\"\\t\", header=True\n        )\n        # GWAS Catalog summary statistics information\n        sumstats_lut = self.session.spark.read.csv(\n            self.catalog_sumstats_lut, sep=\"\\t\", header=False\n        )\n        # GWAS Catalog raw association information\n        catalog_associations = self.session.spark.read.csv(\n            self.catalog_associations_file, sep=\"\\t\", header=True\n        )\n\n        # Transform:\n        # GWAS Catalog study index and study-locus splitted\n        study_index, study_locus = GWASCatalogSplitter.split(\n            StudyIndexGWASCatalog.from_source(\n                catalog_studies, ancestry_lut, sumstats_lut\n            ),\n            StudyLocusGWASCatalog.from_source(catalog_associations, va),\n        )\n\n        # Annotate LD information\n        study_locus = study_locus.annotate_ld(\n            self.session,\n            study_index,\n            self.ld_populations,\n            self.ld_index_template,\n            self.ld_matrix_template,\n            self.min_r2,\n        )\n\n        # Fine-mapping LD-clumped study-locus using PICS\n        finemapped_study_locus = (\n            PICS.finemap(study_locus).annotate_credible_sets().clump()\n        )\n\n        # Write:\n        study_index.df.write.mode(self.session.write_mode).parquet(\n            self.catalog_studies_out\n        )\n        finemapped_study_locus.df.write.mode(self.session.write_mode).parquet(\n            self.catalog_associations_out\n        )\n

GWAS Catalog step requirements.

Attributes:

Name Type Description catalog_studies_file str

Raw GWAS catalog studies file.

catalog_ancestry_file str

Ancestry annotations file from GWAS Catalog.

catalog_sumstats_lut str

GWAS Catalog summary statistics lookup table.

catalog_associations_file str

Raw GWAS catalog associations file.

variant_annotation_path str

Input variant annotation path.

ld_populations list

List of populations to include.

min_r2 float

Minimum r2 to consider when considering variants within a window.

catalog_studies_out str

Output GWAS catalog studies path.

catalog_associations_out str

Output GWAS catalog associations path.

Source code in src/otg/config.py
@dataclass\nclass GWASCatalogStepConfig:\n\"\"\"GWAS Catalog step requirements.\n\n    Attributes:\n        catalog_studies_file (str): Raw GWAS catalog studies file.\n        catalog_ancestry_file (str): Ancestry annotations file from GWAS Catalog.\n        catalog_sumstats_lut (str): GWAS Catalog summary statistics lookup table.\n        catalog_associations_file (str): Raw GWAS catalog associations file.\n        variant_annotation_path (str): Input variant annotation path.\n        ld_populations (list): List of populations to include.\n        min_r2 (float): Minimum r2 to consider when considering variants within a window.\n        catalog_studies_out (str): Output GWAS catalog studies path.\n        catalog_associations_out (str): Output GWAS catalog associations path.\n    \"\"\"\n\n    _target_: str = \"otg.gwas_catalog.GWASCatalogStep\"\n    catalog_studies_file: str = MISSING\n    catalog_ancestry_file: str = MISSING\n    catalog_sumstats_lut: str = MISSING\n    catalog_associations_file: str = MISSING\n    variant_annotation_path: str = MISSING\n    min_r2: float = 0.5\n    ld_matrix_template: str = MISSING\n    ld_index_template: str = MISSING\n    ld_populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"nwe\",  # Northwestern European\n            \"seu\",  # Southeastern European\n        ]\n    )\n    catalog_studies_out: str = MISSING\n    catalog_associations_out: str = MISSING\n
"},{"location":"components/step/gwas_catalog/#otg.gwas_catalog.GWASCatalogStep.run","title":"run()","text":"

Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.

Source code in src/otg/gwas_catalog.py
def run(self: GWASCatalogStep) -> None:\n\"\"\"Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.\"\"\"\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n    # All inputs:\n    # Variant annotation dataset\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n    # GWAS Catalog raw study information\n    catalog_studies = self.session.spark.read.csv(\n        self.catalog_studies_file, sep=\"\\t\", header=True\n    )\n    # GWAS Catalog ancestry information\n    ancestry_lut = self.session.spark.read.csv(\n        self.catalog_ancestry_file, sep=\"\\t\", header=True\n    )\n    # GWAS Catalog summary statistics information\n    sumstats_lut = self.session.spark.read.csv(\n        self.catalog_sumstats_lut, sep=\"\\t\", header=False\n    )\n    # GWAS Catalog raw association information\n    catalog_associations = self.session.spark.read.csv(\n        self.catalog_associations_file, sep=\"\\t\", header=True\n    )\n\n    # Transform:\n    # GWAS Catalog study index and study-locus splitted\n    study_index, study_locus = GWASCatalogSplitter.split(\n        StudyIndexGWASCatalog.from_source(\n            catalog_studies, ancestry_lut, sumstats_lut\n        ),\n        StudyLocusGWASCatalog.from_source(catalog_associations, va),\n    )\n\n    # Annotate LD information\n    study_locus = study_locus.annotate_ld(\n        self.session,\n        study_index,\n        self.ld_populations,\n        self.ld_index_template,\n        self.ld_matrix_template,\n        self.min_r2,\n    )\n\n    # Fine-mapping LD-clumped study-locus using PICS\n    finemapped_study_locus = (\n        PICS.finemap(study_locus).annotate_credible_sets().clump()\n    )\n\n    # Write:\n    study_index.df.write.mode(self.session.write_mode).parquet(\n        self.catalog_studies_out\n    )\n    finemapped_study_locus.df.write.mode(self.session.write_mode).parquet(\n        self.catalog_associations_out\n    )\n
"},{"location":"components/step/gwas_catalog_sumstat_preprocess/","title":"GWAS Catalog sumstat preprocess","text":"

Bases: GWASCatalogSumstatsPreprocessConfig

Step to preprocess GWAS Catalog harmonised summary stats.

Source code in src/otg/gwas_catalog_sumstat_preprocess.py
@dataclass\nclass GWASCatalogSumstatsPreprocessStep(GWASCatalogSumstatsPreprocessConfig):\n\"\"\"Step to preprocess GWAS Catalog harmonised summary stats.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: GWASCatalogSumstatsPreprocessStep) -> None:\n\"\"\"Run Step.\"\"\"\n        # Extract\n        self.session.logger.info(self.raw_sumstats_path)\n        self.session.logger.info(self.out_sumstats_path)\n        self.session.logger.info(self.study_id)\n\n        # Reading dataset:\n        raw_dataset = self.session.spark.read.csv(\n            self.raw_sumstats_path, header=True, sep=\"\\t\"\n        )\n        self.session.logger.info(\n            f\"Number of single point associations: {raw_dataset.count()}\"\n        )\n\n        # Processing dataset:\n        SummaryStatistics.from_gwas_harmonized_summary_stats(\n            raw_dataset, self.study_id\n        ).df.write.mode(self.session.write_mode).parquet(self.out_sumstats_path)\n        self.session.logger.info(\"Processing dataset successfully completed.\")\n

GWAS Catalog Sumstats Preprocessing step requirements.

Attributes:

Name Type Description raw_sumstats_path str

Input raw GWAS Catalog summary statistics path.

out_sumstats_path str

Output GWAS Catalog summary statistics path.

study_id str

GWAS Catalog study identifier.

Source code in src/otg/config.py
@dataclass\nclass GWASCatalogSumstatsPreprocessConfig:\n\"\"\"GWAS Catalog Sumstats Preprocessing step requirements.\n\n    Attributes:\n        raw_sumstats_path (str): Input raw GWAS Catalog summary statistics path.\n        out_sumstats_path (str): Output GWAS Catalog summary statistics path.\n        study_id (str): GWAS Catalog study identifier.\n    \"\"\"\n\n    _target_: str = (\n        \"otg.gwas_catalog_sumstat_preprocess.GWASCatalogSumstatsPreprocessStep\"\n    )\n    raw_sumstats_path: str = MISSING\n    out_sumstats_path: str = MISSING\n    study_id: str = MISSING\n
"},{"location":"components/step/gwas_catalog_sumstat_preprocess/#otg.gwas_catalog_sumstat_preprocess.GWASCatalogSumstatsPreprocessStep.run","title":"run()","text":"

Run Step.

Source code in src/otg/gwas_catalog_sumstat_preprocess.py
def run(self: GWASCatalogSumstatsPreprocessStep) -> None:\n\"\"\"Run Step.\"\"\"\n    # Extract\n    self.session.logger.info(self.raw_sumstats_path)\n    self.session.logger.info(self.out_sumstats_path)\n    self.session.logger.info(self.study_id)\n\n    # Reading dataset:\n    raw_dataset = self.session.spark.read.csv(\n        self.raw_sumstats_path, header=True, sep=\"\\t\"\n    )\n    self.session.logger.info(\n        f\"Number of single point associations: {raw_dataset.count()}\"\n    )\n\n    # Processing dataset:\n    SummaryStatistics.from_gwas_harmonized_summary_stats(\n        raw_dataset, self.study_id\n    ).df.write.mode(self.session.write_mode).parquet(self.out_sumstats_path)\n    self.session.logger.info(\"Processing dataset successfully completed.\")\n
"},{"location":"components/step/ld_index/","title":"LD index","text":"

Bases: LDIndexStepConfig

LD index step.

Source code in src/otg/ld_index.py
@dataclass\nclass LDIndexStep(LDIndexStepConfig):\n\"\"\"LD index step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: LDIndexStep) -> None:\n\"\"\"Run LD index step.\"\"\"\n        # init hail session\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n        for population in self.ld_populations:\n            self.session.logger.info(f\"Processing population: {population}\")\n            ld_index = LDIndex.create(\n                self.ld_index_raw_template.format(POP=population),\n                self.ld_radius,\n                self.grch37_to_grch38_chain_path,\n            )\n\n            self.session.logger.info(\n                f\"Writing ls index to: {self.ld_index_template.format(POP=population)}\"\n            )\n            (\n                ld_index.df.write.partitionBy(\"chromosome\")\n                .mode(self.session.write_mode)\n                .parquet(self.ld_index_template.format(POP=population))  # noqa: FS002\n            )\n

LD index step requirements.

Attributes:

Name Type Description pop_ldindex_path str

Input population LD index file from gnomAD.

ld_radius int

Window radius around locus.

grch37_to_grch38_chain_path str

Path to GRCh37 to GRCh38 chain file.

ld_index_path str

Output LD index path.

Source code in src/otg/config.py
@dataclass\nclass LDIndexStepConfig:\n\"\"\"LD index step requirements.\n\n    Attributes:\n        pop_ldindex_path (str): Input population LD index file from gnomAD.\n        ld_radius (int): Window radius around locus.\n        grch37_to_grch38_chain_path (str): Path to GRCh37 to GRCh38 chain file.\n        ld_index_path (str): Output LD index path.\n    \"\"\"\n\n    _target_: str = \"otg.ld_index.LDIndexStep\"\n    ld_index_raw_template: str = \"gs://gcp-public-data--gnomad/release/2.1.1/ld/gnomad.genomes.r2.1.1.{POP}.common.ld.variant_indices.ht\"\n    ld_radius: int = 500_000\n    grch37_to_grch38_chain_path: str = MISSING\n    ld_index_template: str = MISSING\n    ld_populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"nwe\",  # Northwestern European\n            \"seu\",  # Southeastern European\n        ]\n    )\n
"},{"location":"components/step/ld_index/#otg.ld_index.LDIndexStep.run","title":"run()","text":"

Run LD index step.

Source code in src/otg/ld_index.py
def run(self: LDIndexStep) -> None:\n\"\"\"Run LD index step.\"\"\"\n    # init hail session\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n    for population in self.ld_populations:\n        self.session.logger.info(f\"Processing population: {population}\")\n        ld_index = LDIndex.create(\n            self.ld_index_raw_template.format(POP=population),\n            self.ld_radius,\n            self.grch37_to_grch38_chain_path,\n        )\n\n        self.session.logger.info(\n            f\"Writing ls index to: {self.ld_index_template.format(POP=population)}\"\n        )\n        (\n            ld_index.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.ld_index_template.format(POP=population))  # noqa: FS002\n        )\n
"},{"location":"components/step/ukbiobank/","title":"UKBiobank","text":"

Bases: UKBiobankStepConfig

UKBiobank study table ingestion step.

Source code in src/otg/ukbiobank.py
@dataclass\nclass UKBiobankStep(UKBiobankStepConfig):\n\"\"\"UKBiobank study table ingestion step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: UKBiobankStep) -> None:\n\"\"\"Run UKBiobank study table ingestion step.\"\"\"\n        # Read in the UKBiobank manifest tsv file.\n        df = self.session.spark.read.csv(\n            self.ukbiobank_manifest, sep=\"\\t\", header=True, inferSchema=True\n        )\n\n        # Parse the study index data.\n        ukbiobank_study_index = StudyIndexUKBiobank.from_source(df)\n\n        # Write the output.\n        ukbiobank_study_index.df.write.mode(self.session.write_mode).parquet(\n            self.ukbiobank_study_index_out\n        )\n

UKBiobank study table ingestion step requirements.

Attributes:

Name Type Description ukbiobank_manifest str

UKBiobank manifest of studies.

ukbiobank_study_index_out str

Output path for the UKBiobank study index dataset.

Source code in src/otg/config.py
@dataclass\nclass UKBiobankStepConfig:\n\"\"\"UKBiobank study table ingestion step requirements.\n\n    Attributes:\n        ukbiobank_manifest (str): UKBiobank manifest of studies.\n        ukbiobank_study_index_out (str): Output path for the UKBiobank study index dataset.\n    \"\"\"\n\n    _target_: str = \"otg.ukbiobank.UKBiobankStep\"\n    ukbiobank_manifest: str = MISSING\n    ukbiobank_study_index_out: str = MISSING\n
"},{"location":"components/step/ukbiobank/#otg.ukbiobank.UKBiobankStep.run","title":"run()","text":"

Run UKBiobank study table ingestion step.

Source code in src/otg/ukbiobank.py
def run(self: UKBiobankStep) -> None:\n\"\"\"Run UKBiobank study table ingestion step.\"\"\"\n    # Read in the UKBiobank manifest tsv file.\n    df = self.session.spark.read.csv(\n        self.ukbiobank_manifest, sep=\"\\t\", header=True, inferSchema=True\n    )\n\n    # Parse the study index data.\n    ukbiobank_study_index = StudyIndexUKBiobank.from_source(df)\n\n    # Write the output.\n    ukbiobank_study_index.df.write.mode(self.session.write_mode).parquet(\n        self.ukbiobank_study_index_out\n    )\n
"},{"location":"components/step/variant_annotation_step/","title":"Variant annotation","text":"

Bases: VariantAnnotationStepConfig

Variant annotation step.

Variant annotation step produces a dataset of the type VariantAnnotation derived from gnomADs gnomad.genomes.vX.X.X.sites.ht Hail's table. This dataset is used to validate variants and as a source of annotation.

Source code in src/otg/variant_annotation.py
@dataclass\nclass VariantAnnotationStep(VariantAnnotationStepConfig):\n\"\"\"Variant annotation step.\n\n    Variant annotation step produces a dataset of the type `VariantAnnotation` derived from gnomADs `gnomad.genomes.vX.X.X.sites.ht` Hail's table. This dataset is used to validate variants and as a source of annotation.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: VariantAnnotationStep) -> None:\n\"\"\"Run variant annotation step.\"\"\"\n        # init hail session\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n\"\"\"Run variant annotation step.\"\"\"\n        variant_annotation = VariantAnnotation.from_gnomad(\n            self.gnomad_genomes,\n            self.chain_38_to_37,\n            self.populations,\n        )\n        # Writing data partitioned by chromosome and position:\n        (\n            variant_annotation.df.repartition(400, \"chromosome\")\n            .sortWithinPartitions(\"chromosome\", \"position\")\n            .write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.variant_annotation_path)\n        )\n

Variant annotation step requirements.

Attributes:

Name Type Description gnomad_genomes str

Path to gnomAD genomes hail table.

chain_38_to_37 str

Path to GRCh38 to GRCh37 chain file.

variant_annotation_path str

Output variant annotation path.

populations List[str]

List of populations to include.

Source code in src/otg/config.py
@dataclass\nclass VariantAnnotationStepConfig:\n\"\"\"Variant annotation step requirements.\n\n    Attributes:\n        gnomad_genomes (str): Path to gnomAD genomes hail table.\n        chain_38_to_37 (str): Path to GRCh38 to GRCh37 chain file.\n        variant_annotation_path (str): Output variant annotation path.\n        populations (List[str]): List of populations to include.\n    \"\"\"\n\n    _target_: str = \"otg.variant_annotation.VariantAnnotationStep\"\n    gnomad_genomes: str = MISSING\n    chain_38_to_37: str = MISSING\n    variant_annotation_path: str = MISSING\n    populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"ami\",  # Amish ancestry\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"mid\",  # Middle Eastern\n            \"sas\",  # South Asian\n            \"oth\",  # Other\n        ]\n    )\n
"},{"location":"components/step/variant_annotation_step/#otg.variant_annotation.VariantAnnotationStep.run","title":"run()","text":"

Run variant annotation step.

Source code in src/otg/variant_annotation.py
def run(self: VariantAnnotationStep) -> None:\n\"\"\"Run variant annotation step.\"\"\"\n    # init hail session\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n\"\"\"Run variant annotation step.\"\"\"\n    variant_annotation = VariantAnnotation.from_gnomad(\n        self.gnomad_genomes,\n        self.chain_38_to_37,\n        self.populations,\n    )\n    # Writing data partitioned by chromosome and position:\n    (\n        variant_annotation.df.repartition(400, \"chromosome\")\n        .sortWithinPartitions(\"chromosome\", \"position\")\n        .write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.variant_annotation_path)\n    )\n
"},{"location":"components/step/variant_index_step/","title":"Variant index","text":"

Bases: VariantIndexStepConfig

Variant index step.

Using a VariantAnnotation dataset as a reference, this step creates and writes a dataset of the type VariantIndex that includes only variants that have disease-association data with a reduced set of annotations.

Source code in src/otg/variant_index.py
@dataclass\nclass VariantIndexStep(VariantIndexStepConfig):\n\"\"\"Variant index step.\n\n    Using a `VariantAnnotation` dataset as a reference, this step creates and writes a dataset of the type `VariantIndex` that includes only variants that have disease-association data with a reduced set of annotations.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: VariantIndexStep) -> None:\n\"\"\"Run variant index step.\"\"\"\n        # Variant annotation dataset\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n\n        # Study-locus dataset\n        study_locus = StudyLocus.from_parquet(self.session, self.study_locus_path)\n\n        # Reduce scope of variant annotation dataset to only variants in study-locus sets:\n        va_slimmed = va.filter_by_variant_df(\n            study_locus.unique_lead_tag_variants(), [\"id\", \"chromosome\"]\n        )\n\n        # Generate variant index ussing a subset of the variant annotation dataset\n        vi = VariantIndex.from_variant_annotation(va_slimmed)\n\n        # Write data:\n        # self.etl.logger.info(\n        #     f\"Writing invalid variants from the credible set to: {self.variant_invalid}\"\n        # )\n        # vi.invalid_variants.write.mode(self.etl.write_mode).parquet(\n        #     self.variant_invalid\n        # )\n\n        self.session.logger.info(f\"Writing variant index to: {self.variant_index_path}\")\n        (\n            vi.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.variant_index_path)\n        )\n

Variant index step requirements.

Attributes:

Name Type Description variant_annotation_path str

Input variant annotation path.

study_locus_path str

Input study-locus path.

variant_index_path str

Output variant index path.

Source code in src/otg/config.py
@dataclass\nclass VariantIndexStepConfig:\n\"\"\"Variant index step requirements.\n\n    Attributes:\n        variant_annotation_path (str): Input variant annotation path.\n        study_locus_path (str): Input study-locus path.\n        variant_index_path (str): Output variant index path.\n    \"\"\"\n\n    _target_: str = \"otg.variant_index.VariantIndexStep\"\n    variant_annotation_path: str = MISSING\n    study_locus_path: str = MISSING\n    variant_index_path: str = MISSING\n
"},{"location":"components/step/variant_index_step/#otg.variant_index.VariantIndexStep.run","title":"run()","text":"

Run variant index step.

Source code in src/otg/variant_index.py
def run(self: VariantIndexStep) -> None:\n\"\"\"Run variant index step.\"\"\"\n    # Variant annotation dataset\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n\n    # Study-locus dataset\n    study_locus = StudyLocus.from_parquet(self.session, self.study_locus_path)\n\n    # Reduce scope of variant annotation dataset to only variants in study-locus sets:\n    va_slimmed = va.filter_by_variant_df(\n        study_locus.unique_lead_tag_variants(), [\"id\", \"chromosome\"]\n    )\n\n    # Generate variant index ussing a subset of the variant annotation dataset\n    vi = VariantIndex.from_variant_annotation(va_slimmed)\n\n    # Write data:\n    # self.etl.logger.info(\n    #     f\"Writing invalid variants from the credible set to: {self.variant_invalid}\"\n    # )\n    # vi.invalid_variants.write.mode(self.etl.write_mode).parquet(\n    #     self.variant_invalid\n    # )\n\n    self.session.logger.info(f\"Writing variant index to: {self.variant_index_path}\")\n    (\n        vi.df.write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.variant_index_path)\n    )\n
"},{"location":"components/step/variant_to_gene_step/","title":"V2G","text":"

Bases: V2GStepConfig

Variant-to-gene (V2G) step.

This step aims to generate a dataset that contains multiple pieces of evidence supporting the functional association of specific variants with genes. Some of the evidence types include:

  1. Chromatin interaction experiments, e.g. Promoter Capture Hi-C (PCHi-C).
  2. In silico functional predictions, e.g. Variant Effect Predictor (VEP) from Ensembl.
  3. Distance between the variant and each gene's canonical transcription start site (TSS).
Source code in src/otg/v2g.py
@dataclass\nclass V2GStep(V2GStepConfig):\n\"\"\"Variant-to-gene (V2G) step.\n\n    This step aims to generate a dataset that contains multiple pieces of evidence supporting the functional association of specific variants with genes. Some of the evidence types include:\n\n    1. Chromatin interaction experiments, e.g. Promoter Capture Hi-C (PCHi-C).\n    2. In silico functional predictions, e.g. Variant Effect Predictor (VEP) from Ensembl.\n    3. Distance between the variant and each gene's canonical transcription start site (TSS).\n\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: V2GStep) -> None:\n\"\"\"Run V2G dataset generation.\"\"\"\n        # Filter gene index by approved biotypes to define V2G gene universe\n        gene_index_filtered = GeneIndex.from_parquet(\n            self.session, self.gene_index_path\n        ).filter_by_biotypes(self.approved_biotypes)\n\n        vi = VariantIndex.from_parquet(self.session, self.variant_index_path).persist()\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n        vep_consequences = self.session.spark.read.csv(\n            self.vep_consequences_path, sep=\"\\t\", header=True\n        )\n\n        # Variant annotation reduced to the variant index to define V2G variant universe\n        va_slimmed = va.filter_by_variant_df(vi.df, [\"id\", \"chromosome\"]).persist()\n\n        # lift over variants to hg38\n        lift = LiftOverSpark(\n            self.liftover_chain_file_path, self.liftover_max_length_difference\n        )\n\n        v2g_datasets = [\n            va_slimmed.get_distance_to_tss(gene_index_filtered, self.max_distance),\n            # variant effects\n            va_slimmed.get_most_severe_vep_v2g(vep_consequences, gene_index_filtered),\n            va_slimmed.get_polyphen_v2g(gene_index_filtered),\n            va_slimmed.get_sift_v2g(gene_index_filtered),\n            va_slimmed.get_plof_v2g(gene_index_filtered),\n            # intervals\n            Intervals.parse_andersson(\n                self.session, self.anderson_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_javierre(\n                self.session, self.javierre_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_jung(\n                self.session, self.jung_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_thurman(\n                self.session, self.thurnman_path, gene_index_filtered, lift\n            ).v2g(vi),\n        ]\n\n        # merge all V2G datasets\n        v2g = V2G(\n            _df=reduce(\n                lambda x, y: x.unionByName(y, allowMissingColumns=True),\n                [dataset.df for dataset in v2g_datasets],\n            ).repartition(\"chromosome\")\n        )\n        # write V2G dataset\n        (\n            v2g.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.v2g_path)\n        )\n

Variant to gene (V2G) step requirements.

Attributes:

Name Type Description variant_index_path str

Input variant index path.

variant_annotation_path str

Input variant annotation path.

gene_index_path str

Input gene index path.

vep_consequences_path str

Input VEP consequences path.

lift_over_chain_file_path str

Path to GRCh37 to GRCh38 chain file.

approved_biotypes list[str]

List of approved biotypes.

anderson_path str

Anderson intervals path.

javierre_path str

Javierre intervals path.

jung_path str

Jung intervals path.

thurnman_path str

Thurnman intervals path.

liftover_max_length_difference int

Maximum length difference for liftover.

max_distance int

Maximum distance to consider.

output_path str

Output V2G path.

Source code in src/otg/config.py
@dataclass\nclass V2GStepConfig:\n\"\"\"Variant to gene (V2G) step requirements.\n\n    Attributes:\n        variant_index_path (str): Input variant index path.\n        variant_annotation_path (str): Input variant annotation path.\n        gene_index_path (str): Input gene index path.\n        vep_consequences_path (str): Input VEP consequences path.\n        lift_over_chain_file_path (str): Path to GRCh37 to GRCh38 chain file.\n        approved_biotypes (list[str]): List of approved biotypes.\n        anderson_path (str): Anderson intervals path.\n        javierre_path (str): Javierre intervals path.\n        jung_path (str): Jung intervals path.\n        thurnman_path (str): Thurnman intervals path.\n        liftover_max_length_difference (int): Maximum length difference for liftover.\n        max_distance (int): Maximum distance to consider.\n        output_path (str): Output V2G path.\n    \"\"\"\n\n    _target_: str = \"otg.v2g.V2GStep\"\n    variant_index_path: str = MISSING\n    variant_annotation_path: str = MISSING\n    gene_index_path: str = MISSING\n    vep_consequences_path: str = MISSING\n    liftover_chain_file_path: str = MISSING\n    anderson_path: str = MISSING\n    javierre_path: str = MISSING\n    jung_path: str = MISSING\n    thurnman_path: str = MISSING\n    liftover_max_length_difference: int = 100\n    max_distance: int = 500_000\n    v2g_path: str = MISSING\n    approved_biotypes: List[str] = field(\n        default_factory=lambda: [\n            \"protein_coding\",\n            \"3prime_overlapping_ncRNA\",\n            \"antisense\",\n            \"bidirectional_promoter_lncRNA\",\n            \"IG_C_gene\",\n            \"IG_D_gene\",\n            \"IG_J_gene\",\n            \"IG_V_gene\",\n            \"lincRNA\",\n            \"macro_lncRNA\",\n            \"non_coding\",\n            \"sense_intronic\",\n            \"sense_overlapping\",\n        ]\n    )\n
"},{"location":"components/step/variant_to_gene_step/#otg.v2g.V2GStep.run","title":"run()","text":"

Run V2G dataset generation.

Source code in src/otg/v2g.py
def run(self: V2GStep) -> None:\n\"\"\"Run V2G dataset generation.\"\"\"\n    # Filter gene index by approved biotypes to define V2G gene universe\n    gene_index_filtered = GeneIndex.from_parquet(\n        self.session, self.gene_index_path\n    ).filter_by_biotypes(self.approved_biotypes)\n\n    vi = VariantIndex.from_parquet(self.session, self.variant_index_path).persist()\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n    vep_consequences = self.session.spark.read.csv(\n        self.vep_consequences_path, sep=\"\\t\", header=True\n    )\n\n    # Variant annotation reduced to the variant index to define V2G variant universe\n    va_slimmed = va.filter_by_variant_df(vi.df, [\"id\", \"chromosome\"]).persist()\n\n    # lift over variants to hg38\n    lift = LiftOverSpark(\n        self.liftover_chain_file_path, self.liftover_max_length_difference\n    )\n\n    v2g_datasets = [\n        va_slimmed.get_distance_to_tss(gene_index_filtered, self.max_distance),\n        # variant effects\n        va_slimmed.get_most_severe_vep_v2g(vep_consequences, gene_index_filtered),\n        va_slimmed.get_polyphen_v2g(gene_index_filtered),\n        va_slimmed.get_sift_v2g(gene_index_filtered),\n        va_slimmed.get_plof_v2g(gene_index_filtered),\n        # intervals\n        Intervals.parse_andersson(\n            self.session, self.anderson_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_javierre(\n            self.session, self.javierre_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_jung(\n            self.session, self.jung_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_thurman(\n            self.session, self.thurnman_path, gene_index_filtered, lift\n        ).v2g(vi),\n    ]\n\n    # merge all V2G datasets\n    v2g = V2G(\n        _df=reduce(\n            lambda x, y: x.unionByName(y, allowMissingColumns=True),\n            [dataset.df for dataset in v2g_datasets],\n        ).repartition(\"chromosome\")\n    )\n    # write V2G dataset\n    (\n        v2g.df.write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.v2g_path)\n    )\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

Ingestion and analysis of genetic and functional genomic data for the identification and prioritisation of drug targets.

This project is still in experimental phase. Please refer to the roadmap section for more information.

For information on how to configure the development environment, run the code, or contribute changes, see the contributing section. For known technical issues and solutions to them, see the troubleshooting section.

"},{"location":"contributing/","title":"Environment configuration and contributing changes","text":""},{"location":"contributing/#one-time-configuration","title":"One-time configuration","text":"

The steps in this section only ever need to be done once on any particular system.

Google Cloud configuration: 1. Install Google Cloud SDK: https://cloud.google.com/sdk/docs/install. 1. Log in to your work Google Account: run gcloud auth login and follow instructions. 1. Obtain Google application credentials: run gcloud auth application-default login and follow instructions.

Check that you have the make utility installed, and if not (which is unlikely), install it using your system package manager.

Check that you have java installed.

"},{"location":"contributing/#environment-configuration","title":"Environment configuration","text":"

Run make setup-dev to install/update the necessary packages and activate the development environment. You need to do this every time you open a new shell.

It is recommended to use VS Code as an IDE for development.

"},{"location":"contributing/#how-to-run-the-code","title":"How to run the code","text":"

All pipelines in this repository are intended to be run in Google Dataproc. Running them locally is not currently supported.

In order to run the code:

  1. Manually edit your local workflow/dag.yaml file and comment out the steps you do not want to run.

  2. Manually edit your local pyproject.toml file and modify the version of the code.

    • This must be different from the version used by any other people working on the repository to avoid any deployment conflicts, so it's a good idea to use your name, for example: 1.2.3+jdoe.
    • You can also add a brief branch description, for example: 1.2.3+jdoe.myfeature.
    • Note that the version must comply with PEP440 conventions, otherwise Poetry will not allow it to be deployed.
    • Do not use underscores or hyphens in your version name. When building the WHL file, they will be automatically converted to dots, which means the file name will no longer match the version and the build will fail. Use dots instead.
  3. Run make build.

    • This will create a bundle containing the neccessary code, configuration and dependencies to run the ETL pipeline, and then upload this bundle to Google Cloud.
    • A version specific subpath is used, so uploading the code will not affect any branches but your own.
    • If there was already a code bundle uploaded with the same version number, it will be replaced.
  4. Submit the Dataproc job with poetry run python workflow/workflow_template.py

    • You will need to specify additional parameters, some are mandatory and some are optional. Run with --help to see usage.
    • The script will provision the cluster and submit the job.
    • The cluster will take a few minutes to get provisioned and running, during which the script will not output anything, this is normal.
    • Once submitted, you can monitor the progress of your job on this page: https://console.cloud.google.com/dataproc/jobs?project=open-targets-genetics-dev.
    • On completion (whether successful or a failure), the cluster will be automatically removed, so you don't have to worry about shutting it down to avoid incurring charges.
"},{"location":"contributing/#how-to-generate-a-local-copy-of-the-documentation","title":"How to generate a local copy of the documentation","text":"

Run poetry run mkdocs serve. This will generate the local copy of the documentation and will start a local server to browse it (URL will be printed, usually http://127.0.0.1:8000/).

"},{"location":"contributing/#how-to-run-the-tests","title":"How to run the tests","text":"

Run poetry run pytest.

"},{"location":"contributing/#contributing-checklist","title":"Contributing checklist","text":"

When making changes, and especially when implementing a new module or feature, it's essential to ensure that all relevant sections of the code base are modified.

"},{"location":"contributing/#documentation","title":"Documentation","text":"
  • If during development you had a question which wasn't covered in the documentation, and someone explained it to you, add it to the documentation. The same applies if you encountered any instructions in the documentation which were obsolete or incorrect.
  • Documentation autogeneration expressions start with :::. They will automatically generate sections of the documentation based on class and method docstrings. Be sure to update them for:
  • Dataset definitions in docs/reference/dataset (example: docs/reference/dataset/study_index/study_index_finngen.md)
  • Step definitions in docs/reference/step (example: docs/reference/step/finngen.md)
"},{"location":"contributing/#configuration","title":"Configuration","text":"
  • Input and output paths in config/datasets/gcp.yaml
  • Step configuration in config/step/my_STEP.yaml (example: config/step/my_finngen.yaml)
"},{"location":"contributing/#classes","title":"Classes","text":"
  • Step configuration class in src/org/config.py (example: FinnGenStepConfig class in that module)
  • Dataset class in src/org/dataset/ (example: src/otg/dataset/study_index.py \u2192 StudyIndexFinnGen)
  • Step main running class in src/org/STEP.py (example: src/org/finngen.py)
"},{"location":"contributing/#tests","title":"Tests","text":"
  • Test study fixture in tests/conftest.py (example: mock_study_index_finngen in that module)
  • Test sample data in tests/data_samples (example: tests/data_samples/finngen_studies_sample.json)
  • Test definition in tests/ (example: tests/dataset/test_study_index.py \u2192 test_study_index_finngen_creation)
"},{"location":"roadmap/","title":"Roadmap","text":"

The Open Targets core team is working on refactoring Open Targets Genetics, aiming to:

  • Re-focus the product around Target ID
  • Create a gold standard toolkit for post-GWAS analysis
  • Faster/robust addition of new datasets and datatypes
  • Reduce computational and financial cost

See here for a list of open issues for this project.

Schematic diagram representing the drafted process:

"},{"location":"troubleshooting/","title":"Troubleshooting","text":""},{"location":"troubleshooting/#blaslapack","title":"BLAS/LAPACK","text":"

If you see errors related to BLAS/LAPACK libraries, see this StackOverflow post for guidance.

"},{"location":"troubleshooting/#pyenv-and-poetry","title":"Pyenv and Poetry","text":"

If you see various errors thrown by Pyenv or Poetry, they can be hard to specifically diagnose and resolve. In this case, it often helps to remove those tools from the system completely. Follow these steps:

  1. Close your currently activated environment, if any: exit
  2. Uninstall Poetry: curl -sSL https://install.python-poetry.org | python3 - --uninstall
  3. Clear Poetry cache: rm -rf ~/.cache/pypoetry
  4. Clear pre-commit cache: rm -rf ~/.cache/pre-commit
  5. Switch to system Python shell: pyenv shell system
  6. Edit ~/.bashrc to remove the lines related to Pyenv configuration
  7. Remove Pyenv configuration and cache: rm -rf ~/.pyenv

After that, open a fresh shell session and run make setup-dev again.

"},{"location":"troubleshooting/#java","title":"Java","text":"

Officially, PySpark requires Java version 8 (a.k.a. 1.8) or above to work. However, if you have a very recent version of Java, you may experience issues, as it may introduce breaking changes that PySpark hasn't had time to integrate. For example, as of May 2023, PySpark did not work with Java 20.

If you are encountering problems with initialising a Spark session, try using Java 11.

"},{"location":"troubleshooting/#pre-commit","title":"Pre-commit","text":"

If you see an error message thrown by pre-commit, which looks like this (SyntaxError: Unexpected token '?'), followed by a JavaScript traceback, the issue is likely with your system NodeJS version.

One solution which can help in this case is to upgrade your system NodeJS version. However, this may not always be possible. For example, Ubuntu repository is several major versions behind the latest version as of July 2023.

Another solution which helps is to remove Node, NodeJS, and npm from your system entirely. In this case, pre-commit will not try to rely on a system version of NodeJS and will install its own, suitable one.

On Ubuntu, this can be done using sudo apt remove node nodejs npm, followed by sudo apt autoremove. But in some cases, depending on your existing installation, you may need to also manually remove some files. See this StackOverflow answer for guidance.

After running these commands, you are advised to open a fresh shell, and then also reinstall Pyenv and Poetry to make sure they pick up the changes (see relevant section above).

"},{"location":"components/dataset/_dataset/","title":"Dataset","text":"

Open Targets Genetics Dataset.

Dataset is a wrapper around a Spark DataFrame with a predefined schema. Schemas for each child dataset are described in the json.schemas module.

Source code in src/otg/dataset/dataset.py
@dataclass\nclass Dataset:\n\"\"\"Open Targets Genetics Dataset.\n\n    `Dataset` is a wrapper around a Spark DataFrame with a predefined schema. Schemas for each child dataset are described in the `json.schemas` module.\n    \"\"\"\n\n    _df: DataFrame\n    _schema: StructType\n\n    def __post_init__(self: Dataset) -> None:\n\"\"\"Post init.\"\"\"\n        self.validate_schema()\n\n    @property\n    def df(self: Dataset) -> DataFrame:\n\"\"\"Dataframe included in the Dataset.\"\"\"\n        return self._df\n\n    @df.setter\n    def df(self: Dataset, new_df: DataFrame) -> None:  # noqa: CCE001\n\"\"\"Dataframe setter.\"\"\"\n        self._df = new_df\n        self.validate_schema()\n\n    @property\n    def schema(self: Dataset) -> StructType:\n\"\"\"Dataframe expected schema.\"\"\"\n        return self._schema\n\n    @classmethod\n    def from_parquet(\n        cls: type[Dataset], session: Session, path: str, schema: StructType\n    ) -> Dataset:\n\"\"\"Reads a parquet file into a Dataset with a given schema.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n            schema (StructType): Schema to use\n\n        Returns:\n            Dataset: Dataset with given schema\n        \"\"\"\n        df = session.read_parquet(path=path, schema=schema)\n        return cls(_df=df, _schema=schema)\n\n    def validate_schema(self: Dataset) -> None:  # sourcery skip: invert-any-all\n\"\"\"Validate DataFrame schema against expected class schema.\n\n        Raises:\n            ValueError: DataFrame schema is not valid\n        \"\"\"\n        expected_schema = self._schema\n        expected_fields = flatten_schema(expected_schema)\n        observed_schema = self._df.schema\n        observed_fields = flatten_schema(observed_schema)\n\n        # Unexpected fields in dataset\n        if unexpected_struct_fields := [\n            x for x in observed_fields if x not in expected_fields\n        ]:\n            raise ValueError(\n                f\"The {unexpected_struct_fields} fields are not included in DataFrame schema: {expected_fields}\"\n            )\n\n        # Required fields not in dataset\n        required_fields = [x.name for x in expected_schema if not x.nullable]\n        if missing_required_fields := [\n            req\n            for req in required_fields\n            if not any(field.name == req for field in observed_fields)\n        ]:\n            raise ValueError(\n                f\"The {missing_required_fields} fields are required but missing: {required_fields}\"\n            )\n\n        # Fields with duplicated names\n        if duplicated_fields := [\n            x for x in set(observed_fields) if observed_fields.count(x) > 1\n        ]:\n            raise ValueError(\n                f\"The following fields are duplicated in DataFrame schema: {duplicated_fields}\"\n            )\n\n        # Fields with different datatype\n        if fields_with_different_observed_datatype := [\n            field\n            for field in set(observed_fields)\n            if observed_fields.count(field) != expected_fields.count(field)\n        ]:\n            raise ValueError(\n                f\"The following fields present differences in their datatypes: {fields_with_different_observed_datatype}.\"\n            )\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.df","title":"df: DataFrame property writable","text":"

Dataframe included in the Dataset.

"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.schema","title":"schema: StructType property","text":"

Dataframe expected schema.

"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.__post_init__","title":"__post_init__()","text":"

Post init.

Source code in src/otg/dataset/dataset.py
def __post_init__(self: Dataset) -> None:\n\"\"\"Post init.\"\"\"\n    self.validate_schema()\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.from_parquet","title":"from_parquet(session, path, schema) classmethod","text":"

Reads a parquet file into a Dataset with a given schema.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required schema StructType

Schema to use

required

Returns:

Name Type Description Dataset Dataset

Dataset with given schema

Source code in src/otg/dataset/dataset.py
@classmethod\ndef from_parquet(\n    cls: type[Dataset], session: Session, path: str, schema: StructType\n) -> Dataset:\n\"\"\"Reads a parquet file into a Dataset with a given schema.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n        schema (StructType): Schema to use\n\n    Returns:\n        Dataset: Dataset with given schema\n    \"\"\"\n    df = session.read_parquet(path=path, schema=schema)\n    return cls(_df=df, _schema=schema)\n
"},{"location":"components/dataset/_dataset/#otg.dataset.dataset.Dataset.validate_schema","title":"validate_schema()","text":"

Validate DataFrame schema against expected class schema.

Raises:

Type Description ValueError

DataFrame schema is not valid

Source code in src/otg/dataset/dataset.py
def validate_schema(self: Dataset) -> None:  # sourcery skip: invert-any-all\n\"\"\"Validate DataFrame schema against expected class schema.\n\n    Raises:\n        ValueError: DataFrame schema is not valid\n    \"\"\"\n    expected_schema = self._schema\n    expected_fields = flatten_schema(expected_schema)\n    observed_schema = self._df.schema\n    observed_fields = flatten_schema(observed_schema)\n\n    # Unexpected fields in dataset\n    if unexpected_struct_fields := [\n        x for x in observed_fields if x not in expected_fields\n    ]:\n        raise ValueError(\n            f\"The {unexpected_struct_fields} fields are not included in DataFrame schema: {expected_fields}\"\n        )\n\n    # Required fields not in dataset\n    required_fields = [x.name for x in expected_schema if not x.nullable]\n    if missing_required_fields := [\n        req\n        for req in required_fields\n        if not any(field.name == req for field in observed_fields)\n    ]:\n        raise ValueError(\n            f\"The {missing_required_fields} fields are required but missing: {required_fields}\"\n        )\n\n    # Fields with duplicated names\n    if duplicated_fields := [\n        x for x in set(observed_fields) if observed_fields.count(x) > 1\n    ]:\n        raise ValueError(\n            f\"The following fields are duplicated in DataFrame schema: {duplicated_fields}\"\n        )\n\n    # Fields with different datatype\n    if fields_with_different_observed_datatype := [\n        field\n        for field in set(observed_fields)\n        if observed_fields.count(field) != expected_fields.count(field)\n    ]:\n        raise ValueError(\n            f\"The following fields present differences in their datatypes: {fields_with_different_observed_datatype}.\"\n        )\n
"},{"location":"components/dataset/colocalisation/","title":"Colocalisation","text":"

Bases: Dataset

Colocalisation results for pairs of overlapping study-locus.

Source code in src/otg/dataset/colocalisation.py
@dataclass\nclass Colocalisation(Dataset):\n\"\"\"Colocalisation results for pairs of overlapping study-locus.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"colocalisation.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[Colocalisation], session: Session, path: str\n    ) -> Colocalisation:\n\"\"\"Initialise Colocalisation dataset from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            Colocalisation: Colocalisation results\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/colocalisation/#otg.dataset.colocalisation.Colocalisation.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise Colocalisation dataset from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description Colocalisation Colocalisation

Colocalisation results

Source code in src/otg/dataset/colocalisation.py
@classmethod\ndef from_parquet(\n    cls: type[Colocalisation], session: Session, path: str\n) -> Colocalisation:\n\"\"\"Initialise Colocalisation dataset from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        Colocalisation: Colocalisation results\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/colocalisation/#schema","title":"Schema","text":"
root\n |-- left_studyLocusId: long (nullable = false)\n |-- right_studyLocusId: long (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- colocalisationMethod: string (nullable = false)\n |-- coloc_n_vars: long (nullable = false)\n |-- coloc_h0: double (nullable = true)\n |-- coloc_h1: double (nullable = true)\n |-- coloc_h2: double (nullable = true)\n |-- coloc_h3: double (nullable = true)\n |-- coloc_h4: double (nullable = true)\n |-- coloc_log2_h4_h3: double (nullable = true)\n |-- clpp: double (nullable = true)\n
"},{"location":"components/dataset/gene_index/","title":"Gene index","text":"

Bases: Dataset

Gene index dataset.

Gene-based annotation.

Source code in src/otg/dataset/gene_index.py
@dataclass\nclass GeneIndex(Dataset):\n\"\"\"Gene index dataset.\n\n    Gene-based annotation.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"targets.json\")\n\n    @staticmethod\n    def _get_gene_tss(strand_col: Column, start_col: Column, end_col: Column) -> Column:\n\"\"\"Returns the TSS of a gene based on its orientation.\n\n        Args:\n            strand_col (Column): Column containing 1 if the coding strand of the gene is forward, and -1 if it is reverse.\n            start_col (Column): Column containing the start position of the gene.\n            end_col (Column): Column containing the end position of the gene.\n\n        Returns:\n            Column: Column containing the TSS of the gene.\n\n        Examples:\n            >>> df = spark.createDataFrame([{\"strand\": 1, \"start\": 100, \"end\": 200}, {\"strand\": -1, \"start\": 100, \"end\": 200}])\n            >>> df.withColumn(\"tss\", GeneIndex._get_gene_tss(f.col(\"strand\"), f.col(\"start\"), f.col(\"end\"))).show()\n            +---+-----+------+---+\n            |end|start|strand|tss|\n            +---+-----+------+---+\n            |200|  100|     1|100|\n            |200|  100|    -1|200|\n            +---+-----+------+---+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.when(strand_col == 1, start_col).when(strand_col == -1, end_col)\n\n    @classmethod\n    def from_source(cls: type[GeneIndex], target_index: DataFrame) -> GeneIndex:\n\"\"\"Initialise GeneIndex from source dataset.\n\n        Args:\n            target_index (DataFrame): Target index dataframe\n\n        Returns:\n            GeneIndex: Gene index dataset\n        \"\"\"\n        return cls(\n            _df=target_index.select(\n                f.coalesce(f.col(\"id\"), f.lit(\"unknown\")).alias(\"geneId\"),\n                f.coalesce(f.col(\"genomicLocation.chromosome\"), f.lit(\"unknown\")).alias(\n                    \"chromosome\"\n                ),\n                GeneIndex._get_gene_tss(\n                    f.col(\"genomicLocation.strand\"),\n                    f.col(\"genomicLocation.start\"),\n                    f.col(\"genomicLocation.end\"),\n                ).alias(\"tss\"),\n                \"biotype\",\n                \"approvedSymbol\",\n                \"obsoleteSymbols\",\n            )\n        )\n\n    @classmethod\n    def from_parquet(cls: type[GeneIndex], session: Session, path: str) -> GeneIndex:\n\"\"\"Initialise GeneIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            GeneIndex: Gene index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def filter_by_biotypes(self: GeneIndex, biotypes: list) -> GeneIndex:\n\"\"\"Filter by approved biotypes.\n\n        Args:\n            biotypes (list): List of Ensembl biotypes to keep.\n\n        Returns:\n            GeneIndex: Gene index dataset filtered by biotypes.\n        \"\"\"\n        self.df = self._df.filter(f.col(\"biotype\").isin(biotypes))\n        return self\n\n    def locations_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene location information.\n\n        Returns:\n            DataFrame: Gene LUT including genomic location information.\n        \"\"\"\n        return self.df.select(\n            \"geneId\",\n            \"chromosome\",\n            \"tss\",\n        )\n\n    def symbols_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene symbol lookup table.\n\n        Pre-processess gene/target dataset to create lookup table of gene symbols, including\n        obsoleted gene symbols.\n\n        Returns:\n            DataFrame: Gene LUT for symbol mapping containing `geneId` and `geneSymbol` columns.\n        \"\"\"\n        return self.df.select(\n            \"geneId\",\n            f.explode(\n                f.array_union(f.array(\"approvedSymbol\"), f.col(\"obsoleteSymbols.label\"))\n            ).alias(\"geneSymbol\"),\n        )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.filter_by_biotypes","title":"filter_by_biotypes(biotypes)","text":"

Filter by approved biotypes.

Parameters:

Name Type Description Default biotypes list

List of Ensembl biotypes to keep.

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset filtered by biotypes.

Source code in src/otg/dataset/gene_index.py
def filter_by_biotypes(self: GeneIndex, biotypes: list) -> GeneIndex:\n\"\"\"Filter by approved biotypes.\n\n    Args:\n        biotypes (list): List of Ensembl biotypes to keep.\n\n    Returns:\n        GeneIndex: Gene index dataset filtered by biotypes.\n    \"\"\"\n    self.df = self._df.filter(f.col(\"biotype\").isin(biotypes))\n    return self\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise GeneIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset

Source code in src/otg/dataset/gene_index.py
@classmethod\ndef from_parquet(cls: type[GeneIndex], session: Session, path: str) -> GeneIndex:\n\"\"\"Initialise GeneIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        GeneIndex: Gene index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.from_source","title":"from_source(target_index) classmethod","text":"

Initialise GeneIndex from source dataset.

Parameters:

Name Type Description Default target_index DataFrame

Target index dataframe

required

Returns:

Name Type Description GeneIndex GeneIndex

Gene index dataset

Source code in src/otg/dataset/gene_index.py
@classmethod\ndef from_source(cls: type[GeneIndex], target_index: DataFrame) -> GeneIndex:\n\"\"\"Initialise GeneIndex from source dataset.\n\n    Args:\n        target_index (DataFrame): Target index dataframe\n\n    Returns:\n        GeneIndex: Gene index dataset\n    \"\"\"\n    return cls(\n        _df=target_index.select(\n            f.coalesce(f.col(\"id\"), f.lit(\"unknown\")).alias(\"geneId\"),\n            f.coalesce(f.col(\"genomicLocation.chromosome\"), f.lit(\"unknown\")).alias(\n                \"chromosome\"\n            ),\n            GeneIndex._get_gene_tss(\n                f.col(\"genomicLocation.strand\"),\n                f.col(\"genomicLocation.start\"),\n                f.col(\"genomicLocation.end\"),\n            ).alias(\"tss\"),\n            \"biotype\",\n            \"approvedSymbol\",\n            \"obsoleteSymbols\",\n        )\n    )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.locations_lut","title":"locations_lut()","text":"

Gene location information.

Returns:

Name Type Description DataFrame DataFrame

Gene LUT including genomic location information.

Source code in src/otg/dataset/gene_index.py
def locations_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene location information.\n\n    Returns:\n        DataFrame: Gene LUT including genomic location information.\n    \"\"\"\n    return self.df.select(\n        \"geneId\",\n        \"chromosome\",\n        \"tss\",\n    )\n
"},{"location":"components/dataset/gene_index/#otg.dataset.gene_index.GeneIndex.symbols_lut","title":"symbols_lut()","text":"

Gene symbol lookup table.

Pre-processess gene/target dataset to create lookup table of gene symbols, including obsoleted gene symbols.

Returns:

Name Type Description DataFrame DataFrame

Gene LUT for symbol mapping containing geneId and geneSymbol columns.

Source code in src/otg/dataset/gene_index.py
def symbols_lut(self: GeneIndex) -> DataFrame:\n\"\"\"Gene symbol lookup table.\n\n    Pre-processess gene/target dataset to create lookup table of gene symbols, including\n    obsoleted gene symbols.\n\n    Returns:\n        DataFrame: Gene LUT for symbol mapping containing `geneId` and `geneSymbol` columns.\n    \"\"\"\n    return self.df.select(\n        \"geneId\",\n        f.explode(\n            f.array_union(f.array(\"approvedSymbol\"), f.col(\"obsoleteSymbols.label\"))\n        ).alias(\"geneSymbol\"),\n    )\n
"},{"location":"components/dataset/gene_index/#schema","title":"Schema","text":"
root\n |-- geneId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- approvedSymbol: string (nullable = true)\n |-- biotype: string (nullable = true)\n |-- approvedName: string (nullable = true)\n |-- obsoleteSymbols: array (nullable = true)\n |    |-- element: struct (containsNull = true)\n |    |    |-- label: string (nullable = true)\n |    |    |-- source: string (nullable = true)\n |-- tss: long (nullable = true)\n
"},{"location":"components/dataset/intervals/","title":"Intervals","text":"

Bases: Dataset

Intervals dataset links genes to genomic regions based on genome interaction studies.

Source code in src/otg/dataset/intervals.py
@dataclass\nclass Intervals(Dataset):\n\"\"\"Intervals dataset links genes to genomic regions based on genome interaction studies.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"intervals.json\")\n\n    @classmethod\n    def from_parquet(cls: type[Intervals], session: Session, path: str) -> Intervals:\n\"\"\"Initialise Intervals from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            Intervals: Intervals dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def parse_andersson(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse Andersson et al. 2014 dataset.\n\n        Args:\n            session (Session): session\n            path (str): Path to dataset\n            gene_index (GeneIndex): Gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: Intervals dataset\n        \"\"\"\n        # Constant values:\n        dataset_name = \"andersson2014\"\n        experiment_type = \"fantom5\"\n        pmid = \"24670763\"\n        bio_feature = \"aggregate\"\n        twosided_threshold = 2.45e6  # <-  this needs to phased out. Filter by percentile instead of absolute value.\n\n        session.logger.info(\"Parsing Andersson 2014 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Expected andersson et al. schema:\n        input_schema = t.StructType.fromJson(\n            json.loads(\n                pkg_resources.read_text(schemas, \"andersson2014.json\", encoding=\"utf-8\")\n            )\n        )\n\n        # Read the anderson file:\n        parsed_anderson_df = (\n            session.spark.read.option(\"delimiter\", \"\\t\")\n            .option(\"header\", \"true\")\n            .schema(input_schema)\n            .csv(path)\n            # Parsing score column and casting as float:\n            .withColumn(\"score\", f.col(\"score\").cast(\"float\") / f.lit(1000))\n            # Parsing the 'name' column:\n            .withColumn(\"parsedName\", f.split(f.col(\"name\"), \";\"))\n            .withColumn(\"gene_symbol\", f.col(\"parsedName\")[2])\n            .withColumn(\"location\", f.col(\"parsedName\")[0])\n            .withColumn(\n                \"chrom\",\n                f.regexp_replace(f.split(f.col(\"location\"), \":|-\")[0], \"chr\", \"\"),\n            )\n            .withColumn(\n                \"start\", f.split(f.col(\"location\"), \":|-\")[1].cast(t.IntegerType())\n            )\n            .withColumn(\n                \"end\", f.split(f.col(\"location\"), \":|-\")[2].cast(t.IntegerType())\n            )\n            # Select relevant columns:\n            .select(\"chrom\", \"start\", \"end\", \"gene_symbol\", \"score\")\n            # Drop rows with non-canonical chromosomes:\n            .filter(\n                f.col(\"chrom\").isin([str(x) for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"])\n            )\n            # For each region/gene, keep only one row with the highest score:\n            .groupBy(\"chrom\", \"start\", \"end\", \"gene_symbol\")\n            .agg(f.max(\"score\").alias(\"resourceScore\"))\n            .orderBy(\"chrom\", \"start\")\n        )\n\n        return cls(\n            _df=(\n                # Lift over the intervals:\n                lift.convert_intervals(parsed_anderson_df, \"chrom\", \"start\", \"end\")\n                .drop(\"start\", \"end\")\n                .withColumnRenamed(\"mapped_start\", \"start\")\n                .withColumnRenamed(\"mapped_end\", \"end\")\n                .distinct()\n                # Joining with the gene index\n                .alias(\"intervals\")\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_symbol\") == f.col(\"genes.geneSymbol\")],\n                    how=\"left\",\n                )\n                .filter(\n                    # Drop rows where the gene is not on the same chromosome\n                    (f.col(\"chrom\") == f.col(\"chromosome\"))\n                    # Drop rows where the TSS is far from the start of the region\n                    & (\n                        f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                        <= twosided_threshold\n                    )\n                )\n                # Select relevant columns:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    \"resourceScore\",\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                    f.lit(bio_feature).alias(\"biofeature\"),\n                )\n            )\n        )\n\n    @classmethod\n    def parse_javierre(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse Javierre et al. 2016 dataset.\n\n        Args:\n            session (Session): session\n            path (str): Path to dataset\n            gene_index (GeneIndex): Gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: Javierre et al. 2016 interval data\n        \"\"\"\n        # Constant values:\n        dataset_name = \"javierre2016\"\n        experiment_type = \"pchic\"\n        pmid = \"27863249\"\n        twosided_threshold = 2.45e6\n\n        session.logger.info(\"Parsing Javierre 2016 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Javierre data:\n        javierre_raw = (\n            session.spark.read.parquet(path)\n            # Splitting name column into chromosome, start, end, and score:\n            .withColumn(\"name_split\", f.split(f.col(\"name\"), r\":|-|,\"))\n            .withColumn(\n                \"name_chr\",\n                f.regexp_replace(f.col(\"name_split\")[0], \"chr\", \"\").cast(\n                    t.StringType()\n                ),\n            )\n            .withColumn(\"name_start\", f.col(\"name_split\")[1].cast(t.IntegerType()))\n            .withColumn(\"name_end\", f.col(\"name_split\")[2].cast(t.IntegerType()))\n            .withColumn(\"name_score\", f.col(\"name_split\")[3].cast(t.FloatType()))\n            # Cleaning up chromosome:\n            .withColumn(\n                \"chrom\",\n                f.regexp_replace(f.col(\"chrom\"), \"chr\", \"\").cast(t.StringType()),\n            )\n            .drop(\"name_split\", \"name\", \"annotation\")\n            # Keep canonical chromosomes and consistent chromosomes with scores:\n            .filter(\n                (f.col(\"name_score\").isNotNull())\n                & (f.col(\"chrom\") == f.col(\"name_chr\"))\n                & f.col(\"name_chr\").isin(\n                    [f\"{x}\" for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"]\n                )\n            )\n        )\n\n        # Lifting over intervals:\n        javierre_remapped = (\n            javierre_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\"))\n            .drop(\"start\", \"end\")\n            .withColumnRenamed(\"mapped_chrom\", \"chrom\")\n            .withColumnRenamed(\"mapped_start\", \"start\")\n            .withColumnRenamed(\"mapped_end\", \"end\")\n            # Lifting over interval 2 to GRCh38:\n            .transform(\n                lambda df: lift.convert_intervals(\n                    df, \"name_chr\", \"name_start\", \"name_end\"\n                )\n            )\n            .drop(\"name_start\", \"name_end\")\n            .withColumnRenamed(\"mapped_name_chr\", \"name_chr\")\n            .withColumnRenamed(\"mapped_name_start\", \"name_start\")\n            .withColumnRenamed(\"mapped_name_end\", \"name_end\")\n        )\n\n        # Once the intervals are lifted, extracting the unique intervals:\n        unique_intervals_with_genes = (\n            javierre_remapped.alias(\"intervals\")\n            .select(\n                f.col(\"chrom\"),\n                f.col(\"start\").cast(t.IntegerType()),\n                f.col(\"end\").cast(t.IntegerType()),\n            )\n            .distinct()\n            .join(\n                gene_index.locations_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.chrom\") == f.col(\"genes.chromosome\")],\n                how=\"left\",\n            )\n            # TODO: add filter as part of the join condition\n            .filter(\n                (\n                    (f.col(\"start\") >= f.col(\"genomicLocation.start\"))\n                    & (f.col(\"start\") <= f.col(\"genomicLocation.end\"))\n                )\n                | (\n                    (f.col(\"end\") >= f.col(\"genomicLocation.start\"))\n                    & (f.col(\"end\") <= f.col(\"genomicLocation.end\"))\n                )\n            )\n            .select(\"chrom\", \"start\", \"end\", \"geneId\", \"tss\")\n        )\n\n        # Joining back the data:\n        return cls(\n            _df=(\n                javierre_remapped.join(\n                    unique_intervals_with_genes,\n                    on=[\"chrom\", \"start\", \"end\"],\n                    how=\"left\",\n                )\n                .filter(\n                    # Drop rows where the TSS is far from the start of the region\n                    f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                    <= twosided_threshold\n                )\n                # For each gene, keep only the highest scoring interval:\n                .groupBy(\n                    \"name_chr\", \"name_start\", \"name_end\", \"genes.geneId\", \"bio_feature\"\n                )\n                .agg(f.max(f.col(\"name_score\")).alias(\"resourceScore\"))\n                # Create the output:\n                .select(\n                    f.col(\"name_chr\").alias(\"chromosome\"),\n                    f.col(\"name_start\").alias(\"start\"),\n                    f.col(\"name_end\").alias(\"end\"),\n                    f.col(\"resourceScore\"),\n                    f.col(\"genes.geneId\").alias(\"geneId\"),\n                    f.col(\"bio_feature\").alias(\"biofeature\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n            )\n        )\n\n    @classmethod\n    def parse_jung(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse the Jung et al. 2019 dataset.\n\n        Args:\n            session (Session): session\n            path (str): path to the Jung et al. 2019 dataset\n            gene_index (GeneIndex): gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: _description_\n        \"\"\"\n        dataset_name = \"javierre2016\"\n        experiment_type = \"pchic\"\n        pmid = \"27863249\"\n\n        session.logger.info(\"Parsing Jung 2019 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Jung data:\n        jung_raw = (\n            session.spark.read.csv(path, sep=\",\", header=True)\n            .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n            .select(\n                # Parsing intervals:\n                f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n                f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n                f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n                # Extract other columns:\n                f.col(\"Promoter\").alias(\"gene_name\"),\n                f.col(\"Tissue_type\").alias(\"tissue\"),\n            )\n        )\n\n        # Lifting over the coordinates:\n        return cls(\n            _df=(\n                jung_raw\n                # Lifting over to GRCh38 interval 1:\n                .transform(\n                    lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n                )\n                .select(\n                    \"chrom\",\n                    f.col(\"mapped_start\").alias(\"start\"),\n                    f.col(\"mapped_end\").alias(\"end\"),\n                    f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                    \"tissue\",\n                )\n                .alias(\"intervals\")\n                # Joining with genes:\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                    how=\"inner\",\n                )\n                # Finalize dataset:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    f.col(\"tissue\").alias(\"biofeature\"),\n                    f.lit(1.0).alias(\"score\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n                .drop_duplicates()\n            )\n        )\n\n    @classmethod\n    def parse_thurman(\n        cls: type[Intervals],\n        session: Session,\n        path: str,\n        gene_index: GeneIndex,\n        lift: LiftOverSpark,\n    ) -> Intervals:\n\"\"\"Parse the Thurman et al. 2019 dataset.\n\n        Args:\n            session (Session): session\n            path (str): path to the Thurman et al. 2019 dataset\n            gene_index (GeneIndex): gene index\n            lift (LiftOverSpark): LiftOverSpark instance\n\n        Returns:\n            Intervals: _description_\n        \"\"\"\n        dataset_name = \"thurman2012\"\n        experiment_type = \"dhscor\"\n        pmid = \"22955617\"\n\n        session.logger.info(\"Parsing Jung 2019 data...\")\n        session.logger.info(f\"Reading data from {path}\")\n\n        # Read Jung data:\n        jung_raw = (\n            session.spark.read.csv(path, sep=\",\", header=True)\n            .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n            .select(\n                # Parsing intervals:\n                f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n                f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n                f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n                # Extract other columns:\n                f.col(\"Promoter\").alias(\"gene_name\"),\n                f.col(\"Tissue_type\").alias(\"tissue\"),\n            )\n        )\n\n        return cls(\n            _df=(\n                jung_raw\n                # Lifting over to GRCh38 interval 1:\n                .transform(\n                    lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n                )\n                .select(\n                    \"chrom\",\n                    f.col(\"mapped_start\").alias(\"start\"),\n                    f.col(\"mapped_end\").alias(\"end\"),\n                    f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                    \"tissue\",\n                )\n                .alias(\"intervals\")\n                # Joining with genes:\n                .join(\n                    gene_index.symbols_lut().alias(\"genes\"),\n                    on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                    how=\"inner\",\n                )\n                # Finalize dataset:\n                .select(\n                    \"chromosome\",\n                    \"start\",\n                    \"end\",\n                    \"geneId\",\n                    f.col(\"tissue\").alias(\"biofeature\"),\n                    f.lit(1.0).alias(\"score\"),\n                    f.lit(dataset_name).alias(\"datasourceId\"),\n                    f.lit(experiment_type).alias(\"datatypeId\"),\n                    f.lit(pmid).alias(\"pmid\"),\n                )\n                .drop_duplicates()\n            )\n        )\n\n    def v2g(self: Intervals, variant_index: VariantIndex) -> V2G:\n\"\"\"Convert intervals into V2G by intersecting with a variant index.\n\n        Args:\n            variant_index (VariantIndex): Variant index dataset\n\n        Returns:\n            V2G: Variant-to-gene evidence dataset\n        \"\"\"\n        return V2G(\n            _df=(\n                # TODO: We can include the start and end position as part of the `on` clause in the join\n                self.df.alias(\"interval\")\n                .join(\n                    variant_index.df.selectExpr(\n                        \"chromosome as vi_chromosome\", \"variantId\", \"position\"\n                    ).alias(\"vi\"),\n                    on=[\n                        f.col(\"vi.vi_chromosome\") == f.col(\"interval.chromosome\"),\n                        f.col(\"vi.position\").between(\n                            f.col(\"interval.start\"), f.col(\"interval.end\")\n                        ),\n                    ],\n                    how=\"inner\",\n                )\n                .drop(\"start\", \"end\", \"vi_chromosome\")\n            )\n        )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise Intervals from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description Intervals Intervals

Intervals dataset

Source code in src/otg/dataset/intervals.py
@classmethod\ndef from_parquet(cls: type[Intervals], session: Session, path: str) -> Intervals:\n\"\"\"Initialise Intervals from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        Intervals: Intervals dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_andersson","title":"parse_andersson(session, path, gene_index, lift) classmethod","text":"

Parse Andersson et al. 2014 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

Path to dataset

required gene_index GeneIndex

Gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

Intervals dataset

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_andersson(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse Andersson et al. 2014 dataset.\n\n    Args:\n        session (Session): session\n        path (str): Path to dataset\n        gene_index (GeneIndex): Gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: Intervals dataset\n    \"\"\"\n    # Constant values:\n    dataset_name = \"andersson2014\"\n    experiment_type = \"fantom5\"\n    pmid = \"24670763\"\n    bio_feature = \"aggregate\"\n    twosided_threshold = 2.45e6  # <-  this needs to phased out. Filter by percentile instead of absolute value.\n\n    session.logger.info(\"Parsing Andersson 2014 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Expected andersson et al. schema:\n    input_schema = t.StructType.fromJson(\n        json.loads(\n            pkg_resources.read_text(schemas, \"andersson2014.json\", encoding=\"utf-8\")\n        )\n    )\n\n    # Read the anderson file:\n    parsed_anderson_df = (\n        session.spark.read.option(\"delimiter\", \"\\t\")\n        .option(\"header\", \"true\")\n        .schema(input_schema)\n        .csv(path)\n        # Parsing score column and casting as float:\n        .withColumn(\"score\", f.col(\"score\").cast(\"float\") / f.lit(1000))\n        # Parsing the 'name' column:\n        .withColumn(\"parsedName\", f.split(f.col(\"name\"), \";\"))\n        .withColumn(\"gene_symbol\", f.col(\"parsedName\")[2])\n        .withColumn(\"location\", f.col(\"parsedName\")[0])\n        .withColumn(\n            \"chrom\",\n            f.regexp_replace(f.split(f.col(\"location\"), \":|-\")[0], \"chr\", \"\"),\n        )\n        .withColumn(\n            \"start\", f.split(f.col(\"location\"), \":|-\")[1].cast(t.IntegerType())\n        )\n        .withColumn(\n            \"end\", f.split(f.col(\"location\"), \":|-\")[2].cast(t.IntegerType())\n        )\n        # Select relevant columns:\n        .select(\"chrom\", \"start\", \"end\", \"gene_symbol\", \"score\")\n        # Drop rows with non-canonical chromosomes:\n        .filter(\n            f.col(\"chrom\").isin([str(x) for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"])\n        )\n        # For each region/gene, keep only one row with the highest score:\n        .groupBy(\"chrom\", \"start\", \"end\", \"gene_symbol\")\n        .agg(f.max(\"score\").alias(\"resourceScore\"))\n        .orderBy(\"chrom\", \"start\")\n    )\n\n    return cls(\n        _df=(\n            # Lift over the intervals:\n            lift.convert_intervals(parsed_anderson_df, \"chrom\", \"start\", \"end\")\n            .drop(\"start\", \"end\")\n            .withColumnRenamed(\"mapped_start\", \"start\")\n            .withColumnRenamed(\"mapped_end\", \"end\")\n            .distinct()\n            # Joining with the gene index\n            .alias(\"intervals\")\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_symbol\") == f.col(\"genes.geneSymbol\")],\n                how=\"left\",\n            )\n            .filter(\n                # Drop rows where the gene is not on the same chromosome\n                (f.col(\"chrom\") == f.col(\"chromosome\"))\n                # Drop rows where the TSS is far from the start of the region\n                & (\n                    f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                    <= twosided_threshold\n                )\n            )\n            # Select relevant columns:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                \"resourceScore\",\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n                f.lit(bio_feature).alias(\"biofeature\"),\n            )\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_javierre","title":"parse_javierre(session, path, gene_index, lift) classmethod","text":"

Parse Javierre et al. 2016 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

Path to dataset

required gene_index GeneIndex

Gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

Javierre et al. 2016 interval data

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_javierre(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse Javierre et al. 2016 dataset.\n\n    Args:\n        session (Session): session\n        path (str): Path to dataset\n        gene_index (GeneIndex): Gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: Javierre et al. 2016 interval data\n    \"\"\"\n    # Constant values:\n    dataset_name = \"javierre2016\"\n    experiment_type = \"pchic\"\n    pmid = \"27863249\"\n    twosided_threshold = 2.45e6\n\n    session.logger.info(\"Parsing Javierre 2016 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Javierre data:\n    javierre_raw = (\n        session.spark.read.parquet(path)\n        # Splitting name column into chromosome, start, end, and score:\n        .withColumn(\"name_split\", f.split(f.col(\"name\"), r\":|-|,\"))\n        .withColumn(\n            \"name_chr\",\n            f.regexp_replace(f.col(\"name_split\")[0], \"chr\", \"\").cast(\n                t.StringType()\n            ),\n        )\n        .withColumn(\"name_start\", f.col(\"name_split\")[1].cast(t.IntegerType()))\n        .withColumn(\"name_end\", f.col(\"name_split\")[2].cast(t.IntegerType()))\n        .withColumn(\"name_score\", f.col(\"name_split\")[3].cast(t.FloatType()))\n        # Cleaning up chromosome:\n        .withColumn(\n            \"chrom\",\n            f.regexp_replace(f.col(\"chrom\"), \"chr\", \"\").cast(t.StringType()),\n        )\n        .drop(\"name_split\", \"name\", \"annotation\")\n        # Keep canonical chromosomes and consistent chromosomes with scores:\n        .filter(\n            (f.col(\"name_score\").isNotNull())\n            & (f.col(\"chrom\") == f.col(\"name_chr\"))\n            & f.col(\"name_chr\").isin(\n                [f\"{x}\" for x in range(1, 23)] + [\"X\", \"Y\", \"MT\"]\n            )\n        )\n    )\n\n    # Lifting over intervals:\n    javierre_remapped = (\n        javierre_raw\n        # Lifting over to GRCh38 interval 1:\n        .transform(lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\"))\n        .drop(\"start\", \"end\")\n        .withColumnRenamed(\"mapped_chrom\", \"chrom\")\n        .withColumnRenamed(\"mapped_start\", \"start\")\n        .withColumnRenamed(\"mapped_end\", \"end\")\n        # Lifting over interval 2 to GRCh38:\n        .transform(\n            lambda df: lift.convert_intervals(\n                df, \"name_chr\", \"name_start\", \"name_end\"\n            )\n        )\n        .drop(\"name_start\", \"name_end\")\n        .withColumnRenamed(\"mapped_name_chr\", \"name_chr\")\n        .withColumnRenamed(\"mapped_name_start\", \"name_start\")\n        .withColumnRenamed(\"mapped_name_end\", \"name_end\")\n    )\n\n    # Once the intervals are lifted, extracting the unique intervals:\n    unique_intervals_with_genes = (\n        javierre_remapped.alias(\"intervals\")\n        .select(\n            f.col(\"chrom\"),\n            f.col(\"start\").cast(t.IntegerType()),\n            f.col(\"end\").cast(t.IntegerType()),\n        )\n        .distinct()\n        .join(\n            gene_index.locations_lut().alias(\"genes\"),\n            on=[f.col(\"intervals.chrom\") == f.col(\"genes.chromosome\")],\n            how=\"left\",\n        )\n        # TODO: add filter as part of the join condition\n        .filter(\n            (\n                (f.col(\"start\") >= f.col(\"genomicLocation.start\"))\n                & (f.col(\"start\") <= f.col(\"genomicLocation.end\"))\n            )\n            | (\n                (f.col(\"end\") >= f.col(\"genomicLocation.start\"))\n                & (f.col(\"end\") <= f.col(\"genomicLocation.end\"))\n            )\n        )\n        .select(\"chrom\", \"start\", \"end\", \"geneId\", \"tss\")\n    )\n\n    # Joining back the data:\n    return cls(\n        _df=(\n            javierre_remapped.join(\n                unique_intervals_with_genes,\n                on=[\"chrom\", \"start\", \"end\"],\n                how=\"left\",\n            )\n            .filter(\n                # Drop rows where the TSS is far from the start of the region\n                f.abs((f.col(\"start\") + f.col(\"end\")) / 2 - f.col(\"tss\"))\n                <= twosided_threshold\n            )\n            # For each gene, keep only the highest scoring interval:\n            .groupBy(\n                \"name_chr\", \"name_start\", \"name_end\", \"genes.geneId\", \"bio_feature\"\n            )\n            .agg(f.max(f.col(\"name_score\")).alias(\"resourceScore\"))\n            # Create the output:\n            .select(\n                f.col(\"name_chr\").alias(\"chromosome\"),\n                f.col(\"name_start\").alias(\"start\"),\n                f.col(\"name_end\").alias(\"end\"),\n                f.col(\"resourceScore\"),\n                f.col(\"genes.geneId\").alias(\"geneId\"),\n                f.col(\"bio_feature\").alias(\"biofeature\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_jung","title":"parse_jung(session, path, gene_index, lift) classmethod","text":"

Parse the Jung et al. 2019 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

path to the Jung et al. 2019 dataset

required gene_index GeneIndex

gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

description

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_jung(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse the Jung et al. 2019 dataset.\n\n    Args:\n        session (Session): session\n        path (str): path to the Jung et al. 2019 dataset\n        gene_index (GeneIndex): gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: _description_\n    \"\"\"\n    dataset_name = \"javierre2016\"\n    experiment_type = \"pchic\"\n    pmid = \"27863249\"\n\n    session.logger.info(\"Parsing Jung 2019 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Jung data:\n    jung_raw = (\n        session.spark.read.csv(path, sep=\",\", header=True)\n        .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n        .select(\n            # Parsing intervals:\n            f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n            f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n            f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n            # Extract other columns:\n            f.col(\"Promoter\").alias(\"gene_name\"),\n            f.col(\"Tissue_type\").alias(\"tissue\"),\n        )\n    )\n\n    # Lifting over the coordinates:\n    return cls(\n        _df=(\n            jung_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(\n                lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n            )\n            .select(\n                \"chrom\",\n                f.col(\"mapped_start\").alias(\"start\"),\n                f.col(\"mapped_end\").alias(\"end\"),\n                f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                \"tissue\",\n            )\n            .alias(\"intervals\")\n            # Joining with genes:\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                how=\"inner\",\n            )\n            # Finalize dataset:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                f.col(\"tissue\").alias(\"biofeature\"),\n                f.lit(1.0).alias(\"score\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n            .drop_duplicates()\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.parse_thurman","title":"parse_thurman(session, path, gene_index, lift) classmethod","text":"

Parse the Thurman et al. 2019 dataset.

Parameters:

Name Type Description Default session Session

session

required path str

path to the Thurman et al. 2019 dataset

required gene_index GeneIndex

gene index

required lift LiftOverSpark

LiftOverSpark instance

required

Returns:

Name Type Description Intervals Intervals

description

Source code in src/otg/dataset/intervals.py
@classmethod\ndef parse_thurman(\n    cls: type[Intervals],\n    session: Session,\n    path: str,\n    gene_index: GeneIndex,\n    lift: LiftOverSpark,\n) -> Intervals:\n\"\"\"Parse the Thurman et al. 2019 dataset.\n\n    Args:\n        session (Session): session\n        path (str): path to the Thurman et al. 2019 dataset\n        gene_index (GeneIndex): gene index\n        lift (LiftOverSpark): LiftOverSpark instance\n\n    Returns:\n        Intervals: _description_\n    \"\"\"\n    dataset_name = \"thurman2012\"\n    experiment_type = \"dhscor\"\n    pmid = \"22955617\"\n\n    session.logger.info(\"Parsing Jung 2019 data...\")\n    session.logger.info(f\"Reading data from {path}\")\n\n    # Read Jung data:\n    jung_raw = (\n        session.spark.read.csv(path, sep=\",\", header=True)\n        .withColumn(\"interval\", f.split(f.col(\"Interacting_fragment\"), r\"\\.\"))\n        .select(\n            # Parsing intervals:\n            f.regexp_replace(f.col(\"interval\")[0], \"chr\", \"\").alias(\"chrom\"),\n            f.col(\"interval\")[1].cast(t.IntegerType()).alias(\"start\"),\n            f.col(\"interval\")[2].cast(t.IntegerType()).alias(\"end\"),\n            # Extract other columns:\n            f.col(\"Promoter\").alias(\"gene_name\"),\n            f.col(\"Tissue_type\").alias(\"tissue\"),\n        )\n    )\n\n    return cls(\n        _df=(\n            jung_raw\n            # Lifting over to GRCh38 interval 1:\n            .transform(\n                lambda df: lift.convert_intervals(df, \"chrom\", \"start\", \"end\")\n            )\n            .select(\n                \"chrom\",\n                f.col(\"mapped_start\").alias(\"start\"),\n                f.col(\"mapped_end\").alias(\"end\"),\n                f.explode(f.split(f.col(\"gene_name\"), \";\")).alias(\"gene_name\"),\n                \"tissue\",\n            )\n            .alias(\"intervals\")\n            # Joining with genes:\n            .join(\n                gene_index.symbols_lut().alias(\"genes\"),\n                on=[f.col(\"intervals.gene_name\") == f.col(\"genes.geneSymbol\")],\n                how=\"inner\",\n            )\n            # Finalize dataset:\n            .select(\n                \"chromosome\",\n                \"start\",\n                \"end\",\n                \"geneId\",\n                f.col(\"tissue\").alias(\"biofeature\"),\n                f.lit(1.0).alias(\"score\"),\n                f.lit(dataset_name).alias(\"datasourceId\"),\n                f.lit(experiment_type).alias(\"datatypeId\"),\n                f.lit(pmid).alias(\"pmid\"),\n            )\n            .drop_duplicates()\n        )\n    )\n
"},{"location":"components/dataset/intervals/#otg.dataset.intervals.Intervals.v2g","title":"v2g(variant_index)","text":"

Convert intervals into V2G by intersecting with a variant index.

Parameters:

Name Type Description Default variant_index VariantIndex

Variant index dataset

required

Returns:

Name Type Description V2G V2G

Variant-to-gene evidence dataset

Source code in src/otg/dataset/intervals.py
def v2g(self: Intervals, variant_index: VariantIndex) -> V2G:\n\"\"\"Convert intervals into V2G by intersecting with a variant index.\n\n    Args:\n        variant_index (VariantIndex): Variant index dataset\n\n    Returns:\n        V2G: Variant-to-gene evidence dataset\n    \"\"\"\n    return V2G(\n        _df=(\n            # TODO: We can include the start and end position as part of the `on` clause in the join\n            self.df.alias(\"interval\")\n            .join(\n                variant_index.df.selectExpr(\n                    \"chromosome as vi_chromosome\", \"variantId\", \"position\"\n                ).alias(\"vi\"),\n                on=[\n                    f.col(\"vi.vi_chromosome\") == f.col(\"interval.chromosome\"),\n                    f.col(\"vi.position\").between(\n                        f.col(\"interval.start\"), f.col(\"interval.end\")\n                    ),\n                ],\n                how=\"inner\",\n            )\n            .drop(\"start\", \"end\", \"vi_chromosome\")\n        )\n    )\n
"},{"location":"components/dataset/intervals/#schema","title":"Schema","text":"
root\n |-- chromosome: string (nullable = false)\n |-- start: string (nullable = false)\n |-- end: string (nullable = false)\n |-- geneId: string (nullable = false)\n |-- resourceScore: double (nullable = true)\n |-- score: double (nullable = true)\n |-- datasourceId: string (nullable = false)\n |-- datatypeId: string (nullable = false)\n |-- pmid: string (nullable = true)\n |-- biofeature: string (nullable = true)\n
"},{"location":"components/dataset/ld_index/","title":"LD index","text":"

Bases: Dataset

Dataset to index access to LD information from GnomAD.

Source code in src/otg/dataset/ld_index.py
@dataclass\nclass LDIndex(Dataset):\n\"\"\"Dataset to index access to LD information from GnomAD.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"ld_index.json\")\n\n    @staticmethod\n    def _liftover_loci(variant_index: Table, grch37_to_grch38_chain_path: str) -> Table:\n\"\"\"Liftover hail table with LD variant index.\n\n        Args:\n            variant_index (Table): LD variant indexes\n            grch37_to_grch38_chain_path (str): Path to chain file\n\n        Returns:\n            Table: LD variant index with locus 38 coordinates\n        \"\"\"\n        if not hl.get_reference(\"GRCh37\").has_liftover(\"GRCh38\"):\n            rg37 = hl.get_reference(\"GRCh37\")\n            rg38 = hl.get_reference(\"GRCh38\")\n            rg37.add_liftover(grch37_to_grch38_chain_path, rg38)\n\n        return variant_index.annotate(\n            locus38=hl.liftover(variant_index.locus, \"GRCh38\")\n        )\n\n    @staticmethod\n    def _interval_start(contig: Column, position: Column, ld_radius: int) -> Column:\n\"\"\"Start position of the interval based on available positions.\n\n        Args:\n            contig (Column): genomic contigs\n            position (Column): genomic positions\n            ld_radius (int): bp around locus\n\n        Returns:\n            Column: Position of the locus starting the interval\n\n        Examples:\n            >>> d = [\n            ...     {\"contig\": \"21\", \"pos\": 100},\n            ...     {\"contig\": \"21\", \"pos\": 200},\n            ...     {\"contig\": \"21\", \"pos\": 300},\n            ... ]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"start\", LDIndex._interval_start(f.col(\"contig\"), f.col(\"pos\"), 100)).show()\n            +------+---+-----+\n            |contig|pos|start|\n            +------+---+-----+\n            |    21|100|  100|\n            |    21|200|  100|\n            |    21|300|  200|\n            +------+---+-----+\n            <BLANKLINE>\n\n        \"\"\"\n        w = (\n            Window.partitionBy(contig)\n            .orderBy(position)\n            .rangeBetween(-ld_radius, ld_radius)\n        )\n        return f.min(position).over(w)\n\n    @staticmethod\n    def _interval_stop(contig: Column, position: Column, ld_radius: int) -> Column:\n\"\"\"Stop position of the interval based on available positions.\n\n        Args:\n            contig (Column): genomic contigs\n            position (Column): genomic positions\n            ld_radius (int): bp around locus\n\n        Returns:\n            Column: Position of the locus at the end of the interval\n\n        Examples:\n            >>> d = [\n            ...     {\"contig\": \"21\", \"pos\": 100},\n            ...     {\"contig\": \"21\", \"pos\": 200},\n            ...     {\"contig\": \"21\", \"pos\": 300},\n            ... ]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"start\", LDIndex._interval_stop(f.col(\"contig\"), f.col(\"pos\"), 100)).show()\n            +------+---+-----+\n            |contig|pos|start|\n            +------+---+-----+\n            |    21|100|  200|\n            |    21|200|  300|\n            |    21|300|  300|\n            +------+---+-----+\n            <BLANKLINE>\n\n        \"\"\"\n        w = (\n            Window.partitionBy(contig)\n            .orderBy(position)\n            .rangeBetween(-ld_radius, ld_radius)\n        )\n        return f.max(position).over(w)\n\n    @classmethod\n    def from_parquet(cls: type[LDIndex], session: Session, path: str) -> LDIndex:\n\"\"\"Initialise LD index from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            LDIndex: LD index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def create(\n        cls: type[LDIndex],\n        pop_ldindex_path: str,\n        ld_radius: int,\n        grch37_to_grch38_chain_path: str,\n    ) -> LDIndex:\n\"\"\"Parse LD index and annotate with interval start and stop.\n\n        Args:\n            pop_ldindex_path (str): path to gnomAD LD index\n            ld_radius (int): radius\n            grch37_to_grch38_chain_path (str): path to chain file for liftover\n\n        Returns:\n            LDIndex: Created GnomAD LD index\n        \"\"\"\n        ld_index = hl.read_table(pop_ldindex_path).naive_coalesce(400)\n        ld_index_38 = LDIndex._liftover_loci(ld_index, grch37_to_grch38_chain_path)\n\n        return cls(\n            _df=ld_index_38.to_spark()\n            .filter(f.col(\"`locus38.position`\").isNotNull())\n            .select(\n                f.coalesce(f.col(\"idx\"), f.monotonically_increasing_id()).alias(\"idx\"),\n                f.coalesce(\n                    f.regexp_replace(\"`locus38.contig`\", \"chr\", \"\"), f.lit(\"unknown\")\n                ).alias(\"chromosome\"),\n                f.coalesce(f.col(\"`locus38.position`\"), f.lit(-1)).alias(\"position\"),\n                f.coalesce(f.col(\"`alleles`\").getItem(0), f.lit(\"?\")).alias(\n                    \"referenceAllele\"\n                ),\n                f.coalesce(f.col(\"`alleles`\").getItem(1), f.lit(\"?\")).alias(\n                    \"alternateAllele\"\n                ),\n            )\n            # Convert gnomad position to Ensembl position (1-based for indels)\n            .withColumn(\n                \"position\",\n                convert_gnomad_position_to_ensembl(\n                    f.col(\"position\"),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                ),\n            )\n            .withColumn(\n                \"variantId\",\n                f.concat_ws(\n                    \"_\",\n                    f.col(\"chromosome\"),\n                    f.col(\"position\"),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                ),\n            )\n            # Filter out variants mapping to several indices due to liftover\n            .withColumn(\"count\", f.count(\"*\").over(Window.partitionBy([\"variantId\"])))\n            .filter(f.col(\"count\") == 1)\n            .drop(\"count\")\n            .withColumn(\"start_idx\", f.lit(None).cast(t.LongType()))\n            .withColumn(\"stop_idx\", f.lit(None).cast(t.LongType()))\n            .repartition(400, \"chromosome\")\n            .sortWithinPartitions(\"position\")\n            .persist()\n        ).annotate_index_intervals(ld_radius)\n\n    def annotate_index_intervals(self: LDIndex, ld_radius: int) -> LDIndex:\n\"\"\"Annotate LD index with indices starting and stopping at a given interval.\n\n        Args:\n            ld_radius (int): radius around each position\n\n        Returns:\n            LDIndex: including `start_idx` and `stop_idx` columns\n        \"\"\"\n        index_with_positions = (\n            self._df.drop(\"start_idx\", \"stop_idx\")\n            .select(\n                \"*\",\n                LDIndex._interval_start(\n                    contig=f.col(\"chromosome\"),\n                    position=f.col(\"position\"),\n                    ld_radius=ld_radius,\n                ).alias(\"start_pos\"),\n                LDIndex._interval_stop(\n                    contig=f.col(\"chromosome\"),\n                    position=f.col(\"position\"),\n                    ld_radius=ld_radius,\n                ).alias(\"stop_pos\"),\n            )\n            .persist()\n        )\n\n        self.df = (\n            index_with_positions.join(\n                (\n                    index_with_positions\n                    # Given the multiple variants with the same chromosome/position can have different indices, filter for the lowest index:\n                    .transform(\n                        lambda df: get_record_with_minimum_value(\n                            df, [\"chromosome\", \"position\"], \"idx\"\n                        )\n                    ).select(\n                        \"chromosome\",\n                        f.col(\"position\").alias(\"start_pos\"),\n                        f.col(\"idx\").alias(\"start_idx\"),\n                    )\n                ),\n                on=[\"chromosome\", \"start_pos\"],\n            )\n            .join(\n                (\n                    index_with_positions\n                    # Given the multiple variants with the same chromosome/position can have different indices, filter for the highest index:\n                    .transform(\n                        lambda df: get_record_with_maximum_value(\n                            df, [\"chromosome\", \"position\"], \"idx\"\n                        )\n                    ).select(\n                        \"chromosome\",\n                        f.col(\"position\").alias(\"stop_pos\"),\n                        f.col(\"idx\").alias(\"stop_idx\"),\n                    )\n                ),\n                on=[\"chromosome\", \"stop_pos\"],\n            )\n            # Filter out variants for which start idx > stop idx due to liftover\n            .filter(f.col(\"start_idx\") < f.col(\"stop_idx\"))\n            .drop(\"start_pos\", \"stop_pos\")\n        )\n\n        return self\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.annotate_index_intervals","title":"annotate_index_intervals(ld_radius)","text":"

Annotate LD index with indices starting and stopping at a given interval.

Parameters:

Name Type Description Default ld_radius int

radius around each position

required

Returns:

Name Type Description LDIndex LDIndex

including start_idx and stop_idx columns

Source code in src/otg/dataset/ld_index.py
def annotate_index_intervals(self: LDIndex, ld_radius: int) -> LDIndex:\n\"\"\"Annotate LD index with indices starting and stopping at a given interval.\n\n    Args:\n        ld_radius (int): radius around each position\n\n    Returns:\n        LDIndex: including `start_idx` and `stop_idx` columns\n    \"\"\"\n    index_with_positions = (\n        self._df.drop(\"start_idx\", \"stop_idx\")\n        .select(\n            \"*\",\n            LDIndex._interval_start(\n                contig=f.col(\"chromosome\"),\n                position=f.col(\"position\"),\n                ld_radius=ld_radius,\n            ).alias(\"start_pos\"),\n            LDIndex._interval_stop(\n                contig=f.col(\"chromosome\"),\n                position=f.col(\"position\"),\n                ld_radius=ld_radius,\n            ).alias(\"stop_pos\"),\n        )\n        .persist()\n    )\n\n    self.df = (\n        index_with_positions.join(\n            (\n                index_with_positions\n                # Given the multiple variants with the same chromosome/position can have different indices, filter for the lowest index:\n                .transform(\n                    lambda df: get_record_with_minimum_value(\n                        df, [\"chromosome\", \"position\"], \"idx\"\n                    )\n                ).select(\n                    \"chromosome\",\n                    f.col(\"position\").alias(\"start_pos\"),\n                    f.col(\"idx\").alias(\"start_idx\"),\n                )\n            ),\n            on=[\"chromosome\", \"start_pos\"],\n        )\n        .join(\n            (\n                index_with_positions\n                # Given the multiple variants with the same chromosome/position can have different indices, filter for the highest index:\n                .transform(\n                    lambda df: get_record_with_maximum_value(\n                        df, [\"chromosome\", \"position\"], \"idx\"\n                    )\n                ).select(\n                    \"chromosome\",\n                    f.col(\"position\").alias(\"stop_pos\"),\n                    f.col(\"idx\").alias(\"stop_idx\"),\n                )\n            ),\n            on=[\"chromosome\", \"stop_pos\"],\n        )\n        # Filter out variants for which start idx > stop idx due to liftover\n        .filter(f.col(\"start_idx\") < f.col(\"stop_idx\"))\n        .drop(\"start_pos\", \"stop_pos\")\n    )\n\n    return self\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.create","title":"create(pop_ldindex_path, ld_radius, grch37_to_grch38_chain_path) classmethod","text":"

Parse LD index and annotate with interval start and stop.

Parameters:

Name Type Description Default pop_ldindex_path str

path to gnomAD LD index

required ld_radius int

radius

required grch37_to_grch38_chain_path str

path to chain file for liftover

required

Returns:

Name Type Description LDIndex LDIndex

Created GnomAD LD index

Source code in src/otg/dataset/ld_index.py
@classmethod\ndef create(\n    cls: type[LDIndex],\n    pop_ldindex_path: str,\n    ld_radius: int,\n    grch37_to_grch38_chain_path: str,\n) -> LDIndex:\n\"\"\"Parse LD index and annotate with interval start and stop.\n\n    Args:\n        pop_ldindex_path (str): path to gnomAD LD index\n        ld_radius (int): radius\n        grch37_to_grch38_chain_path (str): path to chain file for liftover\n\n    Returns:\n        LDIndex: Created GnomAD LD index\n    \"\"\"\n    ld_index = hl.read_table(pop_ldindex_path).naive_coalesce(400)\n    ld_index_38 = LDIndex._liftover_loci(ld_index, grch37_to_grch38_chain_path)\n\n    return cls(\n        _df=ld_index_38.to_spark()\n        .filter(f.col(\"`locus38.position`\").isNotNull())\n        .select(\n            f.coalesce(f.col(\"idx\"), f.monotonically_increasing_id()).alias(\"idx\"),\n            f.coalesce(\n                f.regexp_replace(\"`locus38.contig`\", \"chr\", \"\"), f.lit(\"unknown\")\n            ).alias(\"chromosome\"),\n            f.coalesce(f.col(\"`locus38.position`\"), f.lit(-1)).alias(\"position\"),\n            f.coalesce(f.col(\"`alleles`\").getItem(0), f.lit(\"?\")).alias(\n                \"referenceAllele\"\n            ),\n            f.coalesce(f.col(\"`alleles`\").getItem(1), f.lit(\"?\")).alias(\n                \"alternateAllele\"\n            ),\n        )\n        # Convert gnomad position to Ensembl position (1-based for indels)\n        .withColumn(\n            \"position\",\n            convert_gnomad_position_to_ensembl(\n                f.col(\"position\"),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n            ),\n        )\n        .withColumn(\n            \"variantId\",\n            f.concat_ws(\n                \"_\",\n                f.col(\"chromosome\"),\n                f.col(\"position\"),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n            ),\n        )\n        # Filter out variants mapping to several indices due to liftover\n        .withColumn(\"count\", f.count(\"*\").over(Window.partitionBy([\"variantId\"])))\n        .filter(f.col(\"count\") == 1)\n        .drop(\"count\")\n        .withColumn(\"start_idx\", f.lit(None).cast(t.LongType()))\n        .withColumn(\"stop_idx\", f.lit(None).cast(t.LongType()))\n        .repartition(400, \"chromosome\")\n        .sortWithinPartitions(\"position\")\n        .persist()\n    ).annotate_index_intervals(ld_radius)\n
"},{"location":"components/dataset/ld_index/#otg.dataset.ld_index.LDIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise LD index from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description LDIndex LDIndex

LD index dataset

Source code in src/otg/dataset/ld_index.py
@classmethod\ndef from_parquet(cls: type[LDIndex], session: Session, path: str) -> LDIndex:\n\"\"\"Initialise LD index from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        LDIndex: LD index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/ld_index/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- idx: long (nullable = false)\n |-- start_idx: long (nullable = true)\n |-- stop_idx: long (nullable = true)\n
"},{"location":"components/dataset/study_locus_overlap/","title":"Study locus overlap","text":"

Bases: Dataset

Study-Locus overlap.

This dataset captures pairs of overlapping StudyLocus.

Source code in src/otg/dataset/study_locus_overlap.py
@dataclass\nclass StudyLocusOverlap(Dataset):\n\"\"\"Study-Locus overlap.\n\n    This dataset captures pairs of overlapping `StudyLocus`.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"study_locus_overlap.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[StudyLocusOverlap], session: Session, path: str\n    ) -> StudyLocusOverlap:\n\"\"\"Initialise StudyLocusOverlap from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyLocusOverlap: Study-locus overlap dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus_overlap/#otg.dataset.study_locus_overlap.StudyLocusOverlap.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyLocusOverlap from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyLocusOverlap StudyLocusOverlap

Study-locus overlap dataset

Source code in src/otg/dataset/study_locus_overlap.py
@classmethod\ndef from_parquet(\n    cls: type[StudyLocusOverlap], session: Session, path: str\n) -> StudyLocusOverlap:\n\"\"\"Initialise StudyLocusOverlap from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyLocusOverlap: Study-locus overlap dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus_overlap/#schema","title":"Schema","text":"
root\n |-- left_studyLocusId: long (nullable = false)\n |-- right_studyLocusId: long (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- tagVariantId: string (nullable = false)\n |-- right_logABF: double (nullable = true)\n |-- left_logABF: double (nullable = true)\n |-- right_posteriorProbability: double (nullable = true)\n |-- left_posteriorProbability: double (nullable = true)\n
"},{"location":"components/dataset/summary_statistics/","title":"Summary statistics","text":"

Bases: Dataset

Summary Statistics dataset.

A summary statistics dataset contains all single point statistics resulting from a GWAS.

Source code in src/otg/dataset/summary_statistics.py
@dataclass\nclass SummaryStatistics(Dataset):\n\"\"\"Summary Statistics dataset.\n\n    A summary statistics dataset contains all single point statistics resulting from a GWAS.\n    \"\"\"\n\n    _schema: t.StructType = parse_spark_schema(\"summary_statistics.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[SummaryStatistics], session: Session, path: str\n    ) -> SummaryStatistics:\n\"\"\"Initialise SummaryStatistics from parquet file.\n\n        Args:\n            session (Session): Session\n            path (str): Path to parquet file\n\n        Returns:\n            SummaryStatistics: SummaryStatistics dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_gwas_harmonized_summary_stats(\n        cls: type[SummaryStatistics],\n        sumstats_df: DataFrame,\n        study_id: str,\n    ) -> SummaryStatistics:\n\"\"\"Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.\n\n        Args:\n            sumstats_df (DataFrame): Harmonized dataset read as a spark dataframe from GWAS Catalog.\n            study_id (str): GWAS Catalog study accession.\n\n        Returns:\n            SummaryStatistics\n        \"\"\"\n        # The effect allele frequency is an optional column, we have to test if it is there:\n        allele_frequency_expression = (\n            f.col(\"hm_effect_allele_frequency\").cast(t.FloatType())\n            if \"hm_effect_allele_frequency\" in sumstats_df.columns\n            else f.lit(None)\n        )\n\n        # Processing columns of interest:\n        processed_sumstats_df = (\n            sumstats_df\n            # Dropping rows which doesn't have proper position:\n            .filter(f.col(\"hm_pos\").cast(t.IntegerType()).isNotNull())\n            .select(\n                # Adding study identifier:\n                f.lit(study_id).cast(t.StringType()).alias(\"studyId\"),\n                # Adding variant identifier:\n                f.col(\"hm_variant_id\").alias(\"variantId\"),\n                f.col(\"hm_chrom\").alias(\"chromosome\"),\n                f.col(\"hm_pos\").cast(t.IntegerType()).alias(\"position\"),\n                # Parsing p-value mantissa and exponent:\n                *parse_pvalue(f.col(\"p_value\")),\n                # Converting/calculating effect and confidence interval:\n                *convert_odds_ratio_to_beta(\n                    f.col(\"hm_beta\").cast(t.DoubleType()),\n                    f.col(\"hm_odds_ratio\").cast(t.DoubleType()),\n                    f.col(\"standard_error\").cast(t.DoubleType()),\n                ),\n                allele_frequency_expression.alias(\"effectAlleleFrequencyFromSource\"),\n            )\n            # The previous select expression generated the necessary fields for calculating the confidence intervals:\n            .select(\n                \"*\",\n                *calculate_confidence_interval(\n                    f.col(\"pValueMantissa\"),\n                    f.col(\"pValueExponent\"),\n                    f.col(\"beta\"),\n                    f.col(\"standardError\"),\n                ),\n            )\n            .repartition(200, \"chromosome\")\n            .sortWithinPartitions(\"position\")\n        )\n\n        # Initializing summary statistics object:\n        return cls(\n            _df=processed_sumstats_df,\n        )\n\n    def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:\n\"\"\"Filter summary statistics based on the provided p-value threshold.\n\n        Args:\n            pvalue (float): upper limit of the p-value to be filtered upon.\n\n        Returns:\n            SummaryStatistics: summary statistics object containing single point associations with p-values at least as significant as the provided threshold.\n        \"\"\"\n        # Converting p-value to mantissa and exponent:\n        (mantissa, exponent) = split_pvalue(pvalue)\n\n        # Applying filter:\n        df = self._df.filter(\n            (f.col(\"pValueExponent\") < exponent)\n            | (\n                (f.col(\"pValueExponent\") == exponent)\n                & (f.col(\"pValueMantissa\") <= mantissa)\n            )\n        )\n        return SummaryStatistics(_df=df)\n\n    def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:\n\"\"\"Perform distance-based clumping.\n\n        Args:\n            distance (int): Distance in base pairs\n\n        Returns:\n            StudyLocus: StudyLocus object\n        \"\"\"\n        # Calculate distance-based clumping:\n        return WindowBasedClumping.clump(self, distance)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.from_gwas_harmonized_summary_stats","title":"from_gwas_harmonized_summary_stats(sumstats_df, study_id) classmethod","text":"

Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.

Parameters:

Name Type Description Default sumstats_df DataFrame

Harmonized dataset read as a spark dataframe from GWAS Catalog.

required study_id str

GWAS Catalog study accession.

required

Returns:

Type Description SummaryStatistics

SummaryStatistics

Source code in src/otg/dataset/summary_statistics.py
@classmethod\ndef from_gwas_harmonized_summary_stats(\n    cls: type[SummaryStatistics],\n    sumstats_df: DataFrame,\n    study_id: str,\n) -> SummaryStatistics:\n\"\"\"Create summary statistics object from summary statistics flatfile, harmonized by the GWAS Catalog.\n\n    Args:\n        sumstats_df (DataFrame): Harmonized dataset read as a spark dataframe from GWAS Catalog.\n        study_id (str): GWAS Catalog study accession.\n\n    Returns:\n        SummaryStatistics\n    \"\"\"\n    # The effect allele frequency is an optional column, we have to test if it is there:\n    allele_frequency_expression = (\n        f.col(\"hm_effect_allele_frequency\").cast(t.FloatType())\n        if \"hm_effect_allele_frequency\" in sumstats_df.columns\n        else f.lit(None)\n    )\n\n    # Processing columns of interest:\n    processed_sumstats_df = (\n        sumstats_df\n        # Dropping rows which doesn't have proper position:\n        .filter(f.col(\"hm_pos\").cast(t.IntegerType()).isNotNull())\n        .select(\n            # Adding study identifier:\n            f.lit(study_id).cast(t.StringType()).alias(\"studyId\"),\n            # Adding variant identifier:\n            f.col(\"hm_variant_id\").alias(\"variantId\"),\n            f.col(\"hm_chrom\").alias(\"chromosome\"),\n            f.col(\"hm_pos\").cast(t.IntegerType()).alias(\"position\"),\n            # Parsing p-value mantissa and exponent:\n            *parse_pvalue(f.col(\"p_value\")),\n            # Converting/calculating effect and confidence interval:\n            *convert_odds_ratio_to_beta(\n                f.col(\"hm_beta\").cast(t.DoubleType()),\n                f.col(\"hm_odds_ratio\").cast(t.DoubleType()),\n                f.col(\"standard_error\").cast(t.DoubleType()),\n            ),\n            allele_frequency_expression.alias(\"effectAlleleFrequencyFromSource\"),\n        )\n        # The previous select expression generated the necessary fields for calculating the confidence intervals:\n        .select(\n            \"*\",\n            *calculate_confidence_interval(\n                f.col(\"pValueMantissa\"),\n                f.col(\"pValueExponent\"),\n                f.col(\"beta\"),\n                f.col(\"standardError\"),\n            ),\n        )\n        .repartition(200, \"chromosome\")\n        .sortWithinPartitions(\"position\")\n    )\n\n    # Initializing summary statistics object:\n    return cls(\n        _df=processed_sumstats_df,\n    )\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise SummaryStatistics from parquet file.

Parameters:

Name Type Description Default session Session

Session

required path str

Path to parquet file

required

Returns:

Name Type Description SummaryStatistics SummaryStatistics

SummaryStatistics dataset

Source code in src/otg/dataset/summary_statistics.py
@classmethod\ndef from_parquet(\n    cls: type[SummaryStatistics], session: Session, path: str\n) -> SummaryStatistics:\n\"\"\"Initialise SummaryStatistics from parquet file.\n\n    Args:\n        session (Session): Session\n        path (str): Path to parquet file\n\n    Returns:\n        SummaryStatistics: SummaryStatistics dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.pvalue_filter","title":"pvalue_filter(pvalue)","text":"

Filter summary statistics based on the provided p-value threshold.

Parameters:

Name Type Description Default pvalue float

upper limit of the p-value to be filtered upon.

required

Returns:

Name Type Description SummaryStatistics SummaryStatistics

summary statistics object containing single point associations with p-values at least as significant as the provided threshold.

Source code in src/otg/dataset/summary_statistics.py
def pvalue_filter(self: SummaryStatistics, pvalue: float) -> SummaryStatistics:\n\"\"\"Filter summary statistics based on the provided p-value threshold.\n\n    Args:\n        pvalue (float): upper limit of the p-value to be filtered upon.\n\n    Returns:\n        SummaryStatistics: summary statistics object containing single point associations with p-values at least as significant as the provided threshold.\n    \"\"\"\n    # Converting p-value to mantissa and exponent:\n    (mantissa, exponent) = split_pvalue(pvalue)\n\n    # Applying filter:\n    df = self._df.filter(\n        (f.col(\"pValueExponent\") < exponent)\n        | (\n            (f.col(\"pValueExponent\") == exponent)\n            & (f.col(\"pValueMantissa\") <= mantissa)\n        )\n    )\n    return SummaryStatistics(_df=df)\n
"},{"location":"components/dataset/summary_statistics/#otg.dataset.summary_statistics.SummaryStatistics.window_based_clumping","title":"window_based_clumping(distance)","text":"

Perform distance-based clumping.

Parameters:

Name Type Description Default distance int

Distance in base pairs

required

Returns:

Name Type Description StudyLocus StudyLocus

StudyLocus object

Source code in src/otg/dataset/summary_statistics.py
def window_based_clumping(self: SummaryStatistics, distance: int) -> StudyLocus:\n\"\"\"Perform distance-based clumping.\n\n    Args:\n        distance (int): Distance in base pairs\n\n    Returns:\n        StudyLocus: StudyLocus object\n    \"\"\"\n    # Calculate distance-based clumping:\n    return WindowBasedClumping.clump(self, distance)\n
"},{"location":"components/dataset/summary_statistics/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- beta: double (nullable = false)\n |-- betaConfidenceIntervalLower: double (nullable = true)\n |-- betaConfidenceIntervalUpper: double (nullable = true)\n |-- pValueMantissa: float (nullable = false)\n |-- pValueExponent: integer (nullable = false)\n |-- effectAlleleFrequencyFromSource: float (nullable = true)\n |-- standardError: double (nullable = true)\n
"},{"location":"components/dataset/variant_annotation/","title":"Variant annotation","text":"

Bases: Dataset

Dataset with variant-level annotations derived from GnomAD.

Source code in src/otg/dataset/variant_annotation.py
@dataclass\nclass VariantAnnotation(Dataset):\n\"\"\"Dataset with variant-level annotations derived from GnomAD.\"\"\"\n\n    _schema: StructType = parse_spark_schema(\"variant_annotation.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[VariantAnnotation], session: Session, path: str\n    ) -> VariantAnnotation:\n\"\"\"Initialise VariantAnnotation from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            VariantAnnotation: VariantAnnotation dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_gnomad(\n        cls: type[VariantAnnotation],\n        gnomad_file: str,\n        grch38_to_grch37_chain: str,\n        populations: list,\n    ) -> VariantAnnotation:\n\"\"\"Generate variant annotation dataset from gnomAD.\n\n        Some relevant modifications to the original dataset are:\n\n        1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.\n        2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.\n        3. Field names are converted to camel case to follow the convention.\n\n        Args:\n            gnomad_file (str): Path to `gnomad.genomes.vX.X.X.sites.ht` gnomAD dataset\n            grch38_to_grch37_chain (str): Path to chain file for liftover\n            populations (list): List of populations to include in the dataset\n\n        Returns:\n            VariantAnnotation: Variant annotation dataset\n        \"\"\"\n        # Load variants dataset\n        ht = hl.read_table(\n            gnomad_file,\n            _load_refs=False,\n        )\n\n        # Liftover\n        grch37 = hl.get_reference(\"GRCh37\")\n        grch38 = hl.get_reference(\"GRCh38\")\n        grch38.add_liftover(grch38_to_grch37_chain, grch37)\n\n        # Drop non biallelic variants\n        ht = ht.filter(ht.alleles.length() == 2)\n        # Liftover\n        ht = ht.annotate(locus_GRCh37=hl.liftover(ht.locus, \"GRCh37\"))\n        # Select relevant fields and nested records to create class\n        return cls(\n            _df=(\n                ht.select(\n                    gnomad3VariantId=hl.str(\"-\").join(\n                        [\n                            ht.locus.contig.replace(\"chr\", \"\"),\n                            hl.str(ht.locus.position),\n                            ht.alleles[0],\n                            ht.alleles[1],\n                        ]\n                    ),\n                    chromosome=ht.locus.contig.replace(\"chr\", \"\"),\n                    position=convert_gnomad_position_to_ensembl_hail(\n                        ht.locus.position, ht.alleles[0], ht.alleles[1]\n                    ),\n                    variantId=hl.str(\"_\").join(\n                        [\n                            ht.locus.contig.replace(\"chr\", \"\"),\n                            hl.str(\n                                convert_gnomad_position_to_ensembl_hail(\n                                    ht.locus.position, ht.alleles[0], ht.alleles[1]\n                                )\n                            ),\n                            ht.alleles[0],\n                            ht.alleles[1],\n                        ]\n                    ),\n                    chromosomeB37=ht.locus_GRCh37.contig.replace(\"chr\", \"\"),\n                    positionB37=ht.locus_GRCh37.position,\n                    referenceAllele=ht.alleles[0],\n                    alternateAllele=ht.alleles[1],\n                    rsIds=ht.rsid,\n                    alleleType=ht.allele_info.allele_type,\n                    cadd=hl.struct(\n                        phred=ht.cadd.phred,\n                        raw=ht.cadd.raw_score,\n                    ),\n                    alleleFrequencies=hl.set([f\"{pop}-adj\" for pop in populations]).map(\n                        lambda p: hl.struct(\n                            populationName=p,\n                            alleleFrequency=ht.freq[ht.globals.freq_index_dict[p]].AF,\n                        )\n                    ),\n                    vep=hl.struct(\n                        mostSevereConsequence=ht.vep.most_severe_consequence,\n                        transcriptConsequences=hl.map(\n                            lambda x: hl.struct(\n                                aminoAcids=x.amino_acids,\n                                consequenceTerms=x.consequence_terms,\n                                geneId=x.gene_id,\n                                lof=x.lof,\n                                polyphenScore=x.polyphen_score,\n                                polyphenPrediction=x.polyphen_prediction,\n                                siftScore=x.sift_score,\n                                siftPrediction=x.sift_prediction,\n                            ),\n                            # Only keeping canonical transcripts\n                            ht.vep.transcript_consequences.filter(\n                                lambda x: (x.canonical == 1)\n                                & (x.gene_symbol_source == \"HGNC\")\n                            ),\n                        ),\n                    ),\n                )\n                .key_by(\"chromosome\", \"position\")\n                .drop(\"locus\", \"alleles\")\n                .select_globals()\n                .to_spark(flatten=False)\n            )\n        )\n\n    def persist(self: VariantAnnotation) -> VariantAnnotation:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n        self.df = self._df.persist()\n        return self\n\n    def max_maf(self: VariantAnnotation) -> Column:\n\"\"\"Maximum minor allele frequency accross all populations.\n\n        Returns:\n            Column: Maximum minor allele frequency accross all populations.\n        \"\"\"\n        return f.array_max(\n            f.transform(\n                self.df.alleleFrequencies,\n                lambda af: f.when(\n                    af.alleleFrequency > 0.5, 1 - af.alleleFrequency\n                ).otherwise(af.alleleFrequency),\n            )\n        )\n\n    def filter_by_variant_df(\n        self: VariantAnnotation, df: DataFrame, cols: list[str]\n    ) -> VariantAnnotation:\n\"\"\"Filter variant annotation dataset by a variant dataframe.\n\n        Args:\n            df (DataFrame): A dataframe of variants\n            cols (List[str]): A list of columns to join on\n\n        Returns:\n            VariantAnnotation: A filtered variant annotation dataset\n        \"\"\"\n        self.df = self._df.join(f.broadcast(df.select(cols)), on=cols, how=\"inner\")\n        return self\n\n    def get_transcript_consequence_df(\n        self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n    ) -> DataFrame:\n\"\"\"Dataframe of exploded transcript consequences.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index. Defaults to None.\n\n        Returns:\n            DataFrame: A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence\n        \"\"\"\n        # exploding the array removes records without VEP annotation\n        transript_consequences = self.df.withColumn(\n            \"transcriptConsequence\", f.explode(\"vep.transcriptConsequences\")\n        ).select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"transcriptConsequence\",\n            f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n        )\n        if filter_by:\n            transript_consequences = transript_consequences.join(\n                f.broadcast(filter_by.df),\n                on=[\"chromosome\", \"geneId\"],\n            )\n        return transript_consequences.persist()\n\n    def get_most_severe_vep_v2g(\n        self: VariantAnnotation,\n        vep_consequences: DataFrame,\n        filter_by: GeneIndex,\n    ) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            vep_consequences (DataFrame): A dataframe of VEP consequences\n            filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n        Returns:\n            V2G: High and medium severity variant to gene assignments\n        \"\"\"\n        vep_lut = vep_consequences.select(\n            f.element_at(f.split(\"Accession\", r\"/\"), -1).alias(\n                \"variantFunctionalConsequenceId\"\n            ),\n            f.col(\"Term\").alias(\"label\"),\n            f.col(\"v2g_score\").cast(\"double\").alias(\"score\"),\n        )\n\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n                f.explode(\"transcriptConsequence.consequenceTerms\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"variantConsequence\").alias(\"datasourceId\"),\n            )\n            # A variant can have multiple predicted consequences on a transcript, the most severe one is selected\n            .join(\n                f.broadcast(vep_lut),\n                on=\"label\",\n                how=\"inner\",\n            )\n            .filter(f.col(\"score\") != 0)\n            .transform(\n                lambda df: get_record_with_maximum_value(\n                    df, [\"variantId\", \"geneId\"], \"score\"\n                )\n            )\n        )\n\n    def get_polyphen_v2g(\n        self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n    ) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.\n\n        Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n        Returns:\n            V2G: variant to gene assignments with their polyphen scores\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.polyphenScore\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                f.col(\"transcriptConsequence.polyphenScore\").alias(\"score\"),\n                f.col(\"transcriptConsequence.polyphenPrediction\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"polyphen\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_sift_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.\n\n        SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious.\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n\n        Returns:\n            V2G: variant to gene assignments with their SIFT scores\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.siftScore\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                f.expr(\"1 - transcriptConsequence.siftScore\").alias(\"score\"),\n                f.col(\"transcriptConsequence.siftPrediction\").alias(\"label\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"sift\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_plof_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.\n\n        Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n\n        Returns:\n            V2G: variant to gene assignments from the LOFTEE algorithm\n        \"\"\"\n        return V2G(\n            _df=self.get_transcript_consequence_df(filter_by)\n            .filter(f.col(\"transcriptConsequence.lof\").isNotNull())\n            .withColumn(\n                \"isHighQualityPlof\",\n                f.when(f.col(\"transcriptConsequence.lof\") == \"HC\", True).when(\n                    f.col(\"transcriptConsequence.lof\") == \"LC\", False\n                ),\n            )\n            .withColumn(\n                \"score\",\n                f.when(f.col(\"isHighQualityPlof\"), 1.0).when(\n                    ~f.col(\"isHighQualityPlof\"), 0\n                ),\n            )\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"position\",\n                \"geneId\",\n                \"isHighQualityPlof\",\n                f.col(\"score\"),\n                f.lit(\"vep\").alias(\"datatypeId\"),\n                f.lit(\"loftee\").alias(\"datasourceId\"),\n            )\n        )\n\n    def get_distance_to_tss(\n        self: VariantAnnotation,\n        filter_by: GeneIndex,\n        max_distance: int = 500_000,\n    ) -> V2G:\n\"\"\"Extracts variant to gene assignments for variants falling within a window of a gene's TSS.\n\n        Args:\n            filter_by (GeneIndex): A gene index to filter by.\n            max_distance (int): The maximum distance from the TSS to consider. Defaults to 500_000.\n\n        Returns:\n            V2G: variant to gene assignments with their distance to the TSS\n        \"\"\"\n        return V2G(\n            _df=self.df.alias(\"variant\")\n            .join(\n                f.broadcast(filter_by.locations_lut()).alias(\"gene\"),\n                on=[\n                    f.col(\"variant.chromosome\") == f.col(\"gene.chromosome\"),\n                    f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\"))\n                    <= max_distance,\n                ],\n                how=\"inner\",\n            )\n            .withColumn(\n                \"inverse_distance\",\n                max_distance - f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\")),\n            )\n            .transform(lambda df: normalise_column(df, \"inverse_distance\", \"score\"))\n            .select(\n                \"variantId\",\n                f.col(\"variant.chromosome\").alias(\"chromosome\"),\n                \"position\",\n                \"geneId\",\n                \"score\",\n                f.lit(\"distance\").alias(\"datatypeId\"),\n                f.lit(\"canonical_tss\").alias(\"datasourceId\"),\n            )\n        )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.filter_by_variant_df","title":"filter_by_variant_df(df, cols)","text":"

Filter variant annotation dataset by a variant dataframe.

Parameters:

Name Type Description Default df DataFrame

A dataframe of variants

required cols List[str]

A list of columns to join on

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

A filtered variant annotation dataset

Source code in src/otg/dataset/variant_annotation.py
def filter_by_variant_df(\n    self: VariantAnnotation, df: DataFrame, cols: list[str]\n) -> VariantAnnotation:\n\"\"\"Filter variant annotation dataset by a variant dataframe.\n\n    Args:\n        df (DataFrame): A dataframe of variants\n        cols (List[str]): A list of columns to join on\n\n    Returns:\n        VariantAnnotation: A filtered variant annotation dataset\n    \"\"\"\n    self.df = self._df.join(f.broadcast(df.select(cols)), on=cols, how=\"inner\")\n    return self\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.from_gnomad","title":"from_gnomad(gnomad_file, grch38_to_grch37_chain, populations) classmethod","text":"

Generate variant annotation dataset from gnomAD.

Some relevant modifications to the original dataset are:

  1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.
  2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.
  3. Field names are converted to camel case to follow the convention.

Parameters:

Name Type Description Default gnomad_file str

Path to gnomad.genomes.vX.X.X.sites.ht gnomAD dataset

required grch38_to_grch37_chain str

Path to chain file for liftover

required populations list

List of populations to include in the dataset

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

Variant annotation dataset

Source code in src/otg/dataset/variant_annotation.py
@classmethod\ndef from_gnomad(\n    cls: type[VariantAnnotation],\n    gnomad_file: str,\n    grch38_to_grch37_chain: str,\n    populations: list,\n) -> VariantAnnotation:\n\"\"\"Generate variant annotation dataset from gnomAD.\n\n    Some relevant modifications to the original dataset are:\n\n    1. The transcript consequences features provided by VEP are filtered to only refer to the Ensembl canonical transcript.\n    2. Genome coordinates are liftovered from GRCh38 to GRCh37 to keep as annotation.\n    3. Field names are converted to camel case to follow the convention.\n\n    Args:\n        gnomad_file (str): Path to `gnomad.genomes.vX.X.X.sites.ht` gnomAD dataset\n        grch38_to_grch37_chain (str): Path to chain file for liftover\n        populations (list): List of populations to include in the dataset\n\n    Returns:\n        VariantAnnotation: Variant annotation dataset\n    \"\"\"\n    # Load variants dataset\n    ht = hl.read_table(\n        gnomad_file,\n        _load_refs=False,\n    )\n\n    # Liftover\n    grch37 = hl.get_reference(\"GRCh37\")\n    grch38 = hl.get_reference(\"GRCh38\")\n    grch38.add_liftover(grch38_to_grch37_chain, grch37)\n\n    # Drop non biallelic variants\n    ht = ht.filter(ht.alleles.length() == 2)\n    # Liftover\n    ht = ht.annotate(locus_GRCh37=hl.liftover(ht.locus, \"GRCh37\"))\n    # Select relevant fields and nested records to create class\n    return cls(\n        _df=(\n            ht.select(\n                gnomad3VariantId=hl.str(\"-\").join(\n                    [\n                        ht.locus.contig.replace(\"chr\", \"\"),\n                        hl.str(ht.locus.position),\n                        ht.alleles[0],\n                        ht.alleles[1],\n                    ]\n                ),\n                chromosome=ht.locus.contig.replace(\"chr\", \"\"),\n                position=convert_gnomad_position_to_ensembl_hail(\n                    ht.locus.position, ht.alleles[0], ht.alleles[1]\n                ),\n                variantId=hl.str(\"_\").join(\n                    [\n                        ht.locus.contig.replace(\"chr\", \"\"),\n                        hl.str(\n                            convert_gnomad_position_to_ensembl_hail(\n                                ht.locus.position, ht.alleles[0], ht.alleles[1]\n                            )\n                        ),\n                        ht.alleles[0],\n                        ht.alleles[1],\n                    ]\n                ),\n                chromosomeB37=ht.locus_GRCh37.contig.replace(\"chr\", \"\"),\n                positionB37=ht.locus_GRCh37.position,\n                referenceAllele=ht.alleles[0],\n                alternateAllele=ht.alleles[1],\n                rsIds=ht.rsid,\n                alleleType=ht.allele_info.allele_type,\n                cadd=hl.struct(\n                    phred=ht.cadd.phred,\n                    raw=ht.cadd.raw_score,\n                ),\n                alleleFrequencies=hl.set([f\"{pop}-adj\" for pop in populations]).map(\n                    lambda p: hl.struct(\n                        populationName=p,\n                        alleleFrequency=ht.freq[ht.globals.freq_index_dict[p]].AF,\n                    )\n                ),\n                vep=hl.struct(\n                    mostSevereConsequence=ht.vep.most_severe_consequence,\n                    transcriptConsequences=hl.map(\n                        lambda x: hl.struct(\n                            aminoAcids=x.amino_acids,\n                            consequenceTerms=x.consequence_terms,\n                            geneId=x.gene_id,\n                            lof=x.lof,\n                            polyphenScore=x.polyphen_score,\n                            polyphenPrediction=x.polyphen_prediction,\n                            siftScore=x.sift_score,\n                            siftPrediction=x.sift_prediction,\n                        ),\n                        # Only keeping canonical transcripts\n                        ht.vep.transcript_consequences.filter(\n                            lambda x: (x.canonical == 1)\n                            & (x.gene_symbol_source == \"HGNC\")\n                        ),\n                    ),\n                ),\n            )\n            .key_by(\"chromosome\", \"position\")\n            .drop(\"locus\", \"alleles\")\n            .select_globals()\n            .to_spark(flatten=False)\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise VariantAnnotation from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description VariantAnnotation VariantAnnotation

VariantAnnotation dataset

Source code in src/otg/dataset/variant_annotation.py
@classmethod\ndef from_parquet(\n    cls: type[VariantAnnotation], session: Session, path: str\n) -> VariantAnnotation:\n\"\"\"Initialise VariantAnnotation from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        VariantAnnotation: VariantAnnotation dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_distance_to_tss","title":"get_distance_to_tss(filter_by, max_distance=500000)","text":"

Extracts variant to gene assignments for variants falling within a window of a gene's TSS.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required max_distance int

The maximum distance from the TSS to consider. Defaults to 500_000.

500000

Returns:

Name Type Description V2G V2G

variant to gene assignments with their distance to the TSS

Source code in src/otg/dataset/variant_annotation.py
def get_distance_to_tss(\n    self: VariantAnnotation,\n    filter_by: GeneIndex,\n    max_distance: int = 500_000,\n) -> V2G:\n\"\"\"Extracts variant to gene assignments for variants falling within a window of a gene's TSS.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n        max_distance (int): The maximum distance from the TSS to consider. Defaults to 500_000.\n\n    Returns:\n        V2G: variant to gene assignments with their distance to the TSS\n    \"\"\"\n    return V2G(\n        _df=self.df.alias(\"variant\")\n        .join(\n            f.broadcast(filter_by.locations_lut()).alias(\"gene\"),\n            on=[\n                f.col(\"variant.chromosome\") == f.col(\"gene.chromosome\"),\n                f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\"))\n                <= max_distance,\n            ],\n            how=\"inner\",\n        )\n        .withColumn(\n            \"inverse_distance\",\n            max_distance - f.abs(f.col(\"variant.position\") - f.col(\"gene.tss\")),\n        )\n        .transform(lambda df: normalise_column(df, \"inverse_distance\", \"score\"))\n        .select(\n            \"variantId\",\n            f.col(\"variant.chromosome\").alias(\"chromosome\"),\n            \"position\",\n            \"geneId\",\n            \"score\",\n            f.lit(\"distance\").alias(\"datatypeId\"),\n            f.lit(\"canonical_tss\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_most_severe_vep_v2g","title":"get_most_severe_vep_v2g(vep_consequences, filter_by)","text":"

Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default vep_consequences DataFrame

A dataframe of VEP consequences

required filter_by GeneIndex

A gene index to filter by. Defaults to None.

required

Returns:

Name Type Description V2G V2G

High and medium severity variant to gene assignments

Source code in src/otg/dataset/variant_annotation.py
def get_most_severe_vep_v2g(\n    self: VariantAnnotation,\n    vep_consequences: DataFrame,\n    filter_by: GeneIndex,\n) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments based on VEP's predicted consequence on the transcript.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        vep_consequences (DataFrame): A dataframe of VEP consequences\n        filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n    Returns:\n        V2G: High and medium severity variant to gene assignments\n    \"\"\"\n    vep_lut = vep_consequences.select(\n        f.element_at(f.split(\"Accession\", r\"/\"), -1).alias(\n            \"variantFunctionalConsequenceId\"\n        ),\n        f.col(\"Term\").alias(\"label\"),\n        f.col(\"v2g_score\").cast(\"double\").alias(\"score\"),\n    )\n\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n            f.explode(\"transcriptConsequence.consequenceTerms\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"variantConsequence\").alias(\"datasourceId\"),\n        )\n        # A variant can have multiple predicted consequences on a transcript, the most severe one is selected\n        .join(\n            f.broadcast(vep_lut),\n            on=\"label\",\n            how=\"inner\",\n        )\n        .filter(f.col(\"score\") != 0)\n        .transform(\n            lambda df: get_record_with_maximum_value(\n                df, [\"variantId\", \"geneId\"], \"score\"\n            )\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_plof_v2g","title":"get_plof_v2g(filter_by)","text":"

Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required

Returns:

Name Type Description V2G V2G

variant to gene assignments from the LOFTEE algorithm

Source code in src/otg/dataset/variant_annotation.py
def get_plof_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a flag indicating if the variant is predicted to be a loss-of-function variant by the LOFTEE algorithm.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n\n    Returns:\n        V2G: variant to gene assignments from the LOFTEE algorithm\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.lof\").isNotNull())\n        .withColumn(\n            \"isHighQualityPlof\",\n            f.when(f.col(\"transcriptConsequence.lof\") == \"HC\", True).when(\n                f.col(\"transcriptConsequence.lof\") == \"LC\", False\n            ),\n        )\n        .withColumn(\n            \"score\",\n            f.when(f.col(\"isHighQualityPlof\"), 1.0).when(\n                ~f.col(\"isHighQualityPlof\"), 0\n            ),\n        )\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            \"isHighQualityPlof\",\n            f.col(\"score\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"loftee\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_polyphen_v2g","title":"get_polyphen_v2g(filter_by=None)","text":"

Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.

Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by. Defaults to None.

None

Returns:

Name Type Description V2G V2G

variant to gene assignments with their polyphen scores

Source code in src/otg/dataset/variant_annotation.py
def get_polyphen_v2g(\n    self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a PolyPhen's predicted score on the transcript.\n\n    Polyphen informs about the probability that a substitution is damaging. Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by. Defaults to None.\n\n    Returns:\n        V2G: variant to gene assignments with their polyphen scores\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.polyphenScore\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            f.col(\"transcriptConsequence.polyphenScore\").alias(\"score\"),\n            f.col(\"transcriptConsequence.polyphenPrediction\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"polyphen\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_sift_v2g","title":"get_sift_v2g(filter_by)","text":"

Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.

SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious. Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index to filter by.

required

Returns:

Name Type Description V2G V2G

variant to gene assignments with their SIFT scores

Source code in src/otg/dataset/variant_annotation.py
def get_sift_v2g(self: VariantAnnotation, filter_by: GeneIndex) -> V2G:\n\"\"\"Creates a dataset with variant to gene assignments with a SIFT's predicted score on the transcript.\n\n    SIFT informs about the probability that a substitution is tolerated so scores nearer zero are more likely to be deleterious.\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index to filter by.\n\n    Returns:\n        V2G: variant to gene assignments with their SIFT scores\n    \"\"\"\n    return V2G(\n        _df=self.get_transcript_consequence_df(filter_by)\n        .filter(f.col(\"transcriptConsequence.siftScore\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"geneId\",\n            f.expr(\"1 - transcriptConsequence.siftScore\").alias(\"score\"),\n            f.col(\"transcriptConsequence.siftPrediction\").alias(\"label\"),\n            f.lit(\"vep\").alias(\"datatypeId\"),\n            f.lit(\"sift\").alias(\"datasourceId\"),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.get_transcript_consequence_df","title":"get_transcript_consequence_df(filter_by=None)","text":"

Dataframe of exploded transcript consequences.

Optionally the trancript consequences can be reduced to the universe of a gene index.

Parameters:

Name Type Description Default filter_by GeneIndex

A gene index. Defaults to None.

None

Returns:

Name Type Description DataFrame DataFrame

A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence

Source code in src/otg/dataset/variant_annotation.py
def get_transcript_consequence_df(\n    self: VariantAnnotation, filter_by: Optional[GeneIndex] = None\n) -> DataFrame:\n\"\"\"Dataframe of exploded transcript consequences.\n\n    Optionally the trancript consequences can be reduced to the universe of a gene index.\n\n    Args:\n        filter_by (GeneIndex): A gene index. Defaults to None.\n\n    Returns:\n        DataFrame: A dataframe exploded by transcript consequences with the columns variantId, chromosome, transcriptConsequence\n    \"\"\"\n    # exploding the array removes records without VEP annotation\n    transript_consequences = self.df.withColumn(\n        \"transcriptConsequence\", f.explode(\"vep.transcriptConsequences\")\n    ).select(\n        \"variantId\",\n        \"chromosome\",\n        \"position\",\n        \"transcriptConsequence\",\n        f.col(\"transcriptConsequence.geneId\").alias(\"geneId\"),\n    )\n    if filter_by:\n        transript_consequences = transript_consequences.join(\n            f.broadcast(filter_by.df),\n            on=[\"chromosome\", \"geneId\"],\n        )\n    return transript_consequences.persist()\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.max_maf","title":"max_maf()","text":"

Maximum minor allele frequency accross all populations.

Returns:

Name Type Description Column Column

Maximum minor allele frequency accross all populations.

Source code in src/otg/dataset/variant_annotation.py
def max_maf(self: VariantAnnotation) -> Column:\n\"\"\"Maximum minor allele frequency accross all populations.\n\n    Returns:\n        Column: Maximum minor allele frequency accross all populations.\n    \"\"\"\n    return f.array_max(\n        f.transform(\n            self.df.alleleFrequencies,\n            lambda af: f.when(\n                af.alleleFrequency > 0.5, 1 - af.alleleFrequency\n            ).otherwise(af.alleleFrequency),\n        )\n    )\n
"},{"location":"components/dataset/variant_annotation/#otg.dataset.variant_annotation.VariantAnnotation.persist","title":"persist()","text":"

Persist DataFrame included in the Dataset.

Source code in src/otg/dataset/variant_annotation.py
def persist(self: VariantAnnotation) -> VariantAnnotation:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n    self.df = self._df.persist()\n    return self\n
"},{"location":"components/dataset/variant_annotation/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- gnomad3VariantId: string (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- chromosomeB37: string (nullable = true)\n |-- positionB37: integer (nullable = true)\n |-- alleleType: string (nullable = true)\n |-- rsIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- alleleFrequencies: array (nullable = false)\n |    |-- element: struct (containsNull = true)\n |    |    |-- populationName: string (nullable = true)\n |    |    |-- alleleFrequency: double (nullable = true)\n |-- cadd: struct (nullable = true)\n |    |-- phred: float (nullable = true)\n |    |-- raw: float (nullable = true)\n |-- vep: struct (nullable = false)\n |    |-- mostSevereConsequence: string (nullable = true)\n |    |-- transcriptConsequences: array (nullable = true)\n |    |    |-- element: struct (containsNull = true)\n |    |    |    |-- aminoAcids: string (nullable = true)\n |    |    |    |-- consequenceTerms: array (nullable = true)\n |    |    |    |    |-- element: string (containsNull = true)\n |    |    |    |-- geneId: string (nullable = true)\n |    |    |    |-- lof: string (nullable = true)\n |    |    |    |-- polyphenScore: double (nullable = true)\n |    |    |    |-- polyphenPrediction: string (nullable = true)\n |    |    |    |-- siftScore: double (nullable = true)\n |    |    |    |-- siftPrediction: string (nullable = true)\n
"},{"location":"components/dataset/variant_index/","title":"Variant index","text":"

Bases: Dataset

Variant index dataset.

Variant index dataset is the result of intersecting the variant annotation (gnomad) dataset with the variants with V2D available information.

Source code in src/otg/dataset/variant_index.py
@dataclass\nclass VariantIndex(Dataset):\n\"\"\"Variant index dataset.\n\n    Variant index dataset is the result of intersecting the variant annotation (gnomad) dataset with the variants with V2D available information.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"variant_index.json\")\n\n    @classmethod\n    def from_parquet(\n        cls: type[VariantIndex], session: Session, path: str\n    ) -> VariantIndex:\n\"\"\"Initialise VariantIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            VariantIndex: VariantIndex dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    @classmethod\n    def from_variant_annotation(\n        cls: type[VariantIndex],\n        variant_annotation: VariantAnnotation,\n    ) -> VariantIndex:\n\"\"\"Initialise VariantIndex from pre-existing variant annotation dataset.\"\"\"\n        unchanged_cols = [\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"chromosomeB37\",\n            \"positionB37\",\n            \"alleleType\",\n            \"alleleFrequencies\",\n            \"cadd\",\n        ]\n        vi = cls(\n            _df=variant_annotation.df.select(\n                *unchanged_cols,\n                f.col(\"vep.mostSevereConsequence\").alias(\"mostSevereConsequence\"),\n                # filters/rsid are arrays that can be empty, in this case we convert them to null\n                nullify_empty_array(f.col(\"rsIds\")).alias(\"rsIds\"),\n            ),\n        )\n        return VariantIndex(\n            _df=vi.df.repartition(\n                400,\n                \"chromosome\",\n            ).sortWithinPartitions(\"chromosome\", \"position\")\n        )\n\n    def persist(self: VariantIndex) -> VariantIndex:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n        self.df = self._df.persist()\n        return self\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise VariantIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description VariantIndex VariantIndex

VariantIndex dataset

Source code in src/otg/dataset/variant_index.py
@classmethod\ndef from_parquet(\n    cls: type[VariantIndex], session: Session, path: str\n) -> VariantIndex:\n\"\"\"Initialise VariantIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        VariantIndex: VariantIndex dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.from_variant_annotation","title":"from_variant_annotation(variant_annotation) classmethod","text":"

Initialise VariantIndex from pre-existing variant annotation dataset.

Source code in src/otg/dataset/variant_index.py
@classmethod\ndef from_variant_annotation(\n    cls: type[VariantIndex],\n    variant_annotation: VariantAnnotation,\n) -> VariantIndex:\n\"\"\"Initialise VariantIndex from pre-existing variant annotation dataset.\"\"\"\n    unchanged_cols = [\n        \"variantId\",\n        \"chromosome\",\n        \"position\",\n        \"referenceAllele\",\n        \"alternateAllele\",\n        \"chromosomeB37\",\n        \"positionB37\",\n        \"alleleType\",\n        \"alleleFrequencies\",\n        \"cadd\",\n    ]\n    vi = cls(\n        _df=variant_annotation.df.select(\n            *unchanged_cols,\n            f.col(\"vep.mostSevereConsequence\").alias(\"mostSevereConsequence\"),\n            # filters/rsid are arrays that can be empty, in this case we convert them to null\n            nullify_empty_array(f.col(\"rsIds\")).alias(\"rsIds\"),\n        ),\n    )\n    return VariantIndex(\n        _df=vi.df.repartition(\n            400,\n            \"chromosome\",\n        ).sortWithinPartitions(\"chromosome\", \"position\")\n    )\n
"},{"location":"components/dataset/variant_index/#otg.dataset.variant_index.VariantIndex.persist","title":"persist()","text":"

Persist DataFrame included in the Dataset.

Source code in src/otg/dataset/variant_index.py
def persist(self: VariantIndex) -> VariantIndex:\n\"\"\"Persist DataFrame included in the Dataset.\"\"\"\n    self.df = self._df.persist()\n    return self\n
"},{"location":"components/dataset/variant_index/#schema","title":"Schema","text":"
root\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = false)\n |-- position: integer (nullable = false)\n |-- referenceAllele: string (nullable = false)\n |-- alternateAllele: string (nullable = false)\n |-- chromosomeB37: string (nullable = true)\n |-- positionB37: integer (nullable = true)\n |-- alleleType: string (nullable = false)\n |-- alleleFrequencies: array (nullable = false)\n |    |-- element: struct (containsNull = true)\n |    |    |-- populationName: string (nullable = true)\n |    |    |-- alleleFrequency: double (nullable = true)\n |-- cadd: struct (nullable = true)\n |    |-- phred: float (nullable = true)\n |    |-- raw: float (nullable = true)\n |-- mostSevereConsequence: string (nullable = true)\n |-- rsIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n
"},{"location":"components/dataset/variant_to_gene/","title":"Variant to gene","text":"

Bases: Dataset

Variant-to-gene (V2G) evidence dataset.

A variant-to-gene (V2G) evidence is understood as any piece of evidence that supports the association of a variant with a likely causal gene. The evidence can sometimes be context-specific and refer to specific biofeatures (e.g. cell types)

Source code in src/otg/dataset/v2g.py
@dataclass\nclass V2G(Dataset):\n\"\"\"Variant-to-gene (V2G) evidence dataset.\n\n    A variant-to-gene (V2G) evidence is understood as any piece of evidence that supports the association of a variant with a likely causal gene. The evidence can sometimes be context-specific and refer to specific `biofeatures` (e.g. cell types)\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"v2g.json\")\n\n    @classmethod\n    def from_parquet(cls: type[V2G], session: Session, path: str) -> V2G:\n\"\"\"Initialise V2G from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            V2G: V2G dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def filter_by_genes(self: V2G, genes: GeneIndex) -> V2G:\n\"\"\"Filter by V2G dataset by genes.\n\n        Args:\n            genes (GeneIndex): Gene index dataset to filter by\n\n        Returns:\n            V2G: V2G dataset filtered by genes\n        \"\"\"\n        self.df = self._df.join(genes.df.select(\"geneId\"), on=\"geneId\", how=\"inner\")\n        return self\n
"},{"location":"components/dataset/variant_to_gene/#otg.dataset.v2g.V2G.filter_by_genes","title":"filter_by_genes(genes)","text":"

Filter by V2G dataset by genes.

Parameters:

Name Type Description Default genes GeneIndex

Gene index dataset to filter by

required

Returns:

Name Type Description V2G V2G

V2G dataset filtered by genes

Source code in src/otg/dataset/v2g.py
def filter_by_genes(self: V2G, genes: GeneIndex) -> V2G:\n\"\"\"Filter by V2G dataset by genes.\n\n    Args:\n        genes (GeneIndex): Gene index dataset to filter by\n\n    Returns:\n        V2G: V2G dataset filtered by genes\n    \"\"\"\n    self.df = self._df.join(genes.df.select(\"geneId\"), on=\"geneId\", how=\"inner\")\n    return self\n
"},{"location":"components/dataset/variant_to_gene/#otg.dataset.v2g.V2G.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise V2G from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description V2G V2G

V2G dataset

Source code in src/otg/dataset/v2g.py
@classmethod\ndef from_parquet(cls: type[V2G], session: Session, path: str) -> V2G:\n\"\"\"Initialise V2G from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        V2G: V2G dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/variant_to_gene/#schema","title":"Schema","text":"
root\n |-- geneId: string (nullable = false)\n |-- variantId: string (nullable = false)\n |-- distance: long (nullable = true)\n |-- chromosome: string (nullable = false)\n |-- datatypeId: string (nullable = false)\n |-- datasourceId: string (nullable = false)\n |-- score: double (nullable = true)\n |-- resourceScore: double (nullable = true)\n |-- pmid: string (nullable = true)\n |-- biofeature: string (nullable = true)\n |-- position: integer (nullable = false)\n |-- label: string (nullable = true)\n |-- variantFunctionalConsequenceId: string (nullable = true)\n |-- isHighQualityPlof: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/_study_index/","title":"Study index","text":"

Bases: Dataset

Study index dataset.

A study index dataset captures all the metadata for all studies including GWAS and Molecular QTL.

Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndex(Dataset):\n\"\"\"Study index dataset.\n\n    A study index dataset captures all the metadata for all studies including GWAS and Molecular QTL.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"studies.json\")\n\n    @classmethod\n    def from_parquet(cls: type[StudyIndex], session: Session, path: str) -> StudyIndex:\n\"\"\"Initialise StudyIndex from parquet file.\n\n        Args:\n            session (Session): ETL session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyIndex: Study index dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def study_type_lut(self: StudyIndex) -> DataFrame:\n\"\"\"Return a lookup table of study type.\n\n        Returns:\n            DataFrame: A dataframe containing `studyId` and `studyType` columns.\n        \"\"\"\n        return self.df.select(\"studyId\", \"studyType\")\n
"},{"location":"components/dataset/study_index/_study_index/#otg.dataset.study_index.StudyIndex.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyIndex from parquet file.

Parameters:

Name Type Description Default session Session

ETL session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyIndex StudyIndex

Study index dataset

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_parquet(cls: type[StudyIndex], session: Session, path: str) -> StudyIndex:\n\"\"\"Initialise StudyIndex from parquet file.\n\n    Args:\n        session (Session): ETL session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyIndex: Study index dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_index/_study_index/#otg.dataset.study_index.StudyIndex.study_type_lut","title":"study_type_lut()","text":"

Return a lookup table of study type.

Returns:

Name Type Description DataFrame DataFrame

A dataframe containing studyId and studyType columns.

Source code in src/otg/dataset/study_index.py
def study_type_lut(self: StudyIndex) -> DataFrame:\n\"\"\"Return a lookup table of study type.\n\n    Returns:\n        DataFrame: A dataframe containing `studyId` and `studyType` columns.\n    \"\"\"\n    return self.df.select(\"studyId\", \"studyType\")\n
"},{"location":"components/dataset/study_index/_study_index/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/study_index_finngen/","title":"Study index finngen","text":"

Bases: StudyIndex

Study index dataset from FinnGen.

The following information is aggregated/extracted:

  • Study ID in the special format (FINNGEN_R9_*)
  • Trait name (for example, Amoebiasis)
  • Number of cases and controls
  • Link to the summary statistics location

Some fields are also populated as constants, such as study type and the initial sample size.

Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndexFinnGen(StudyIndex):\n\"\"\"Study index dataset from FinnGen.\n\n    The following information is aggregated/extracted:\n\n    - Study ID in the special format (FINNGEN_R9_*)\n    - Trait name (for example, Amoebiasis)\n    - Number of cases and controls\n    - Link to the summary statistics location\n\n    Some fields are also populated as constants, such as study type and the initial sample size.\n    \"\"\"\n\n    @classmethod\n    def from_source(\n        cls: type[StudyIndexFinnGen],\n        finngen_studies: DataFrame,\n        finngen_release_prefix: str,\n        finngen_sumstat_url_prefix: str,\n        finngen_sumstat_url_suffix: str,\n    ) -> StudyIndexFinnGen:\n\"\"\"This function ingests study level metadata from FinnGen.\n\n        Args:\n            finngen_studies (DataFrame): FinnGen raw study table\n            finngen_release_prefix (str): Release prefix pattern.\n            finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n            finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n\n        Returns:\n            StudyIndexFinnGen: Parsed and annotated FinnGen study table.\n        \"\"\"\n        return cls(\n            _df=(\n                # Read FinnGen raw data.\n                finngen_studies.select(\n                    # Select the desired columns.\n                    f.concat(\n                        f.lit(finngen_release_prefix + \"_\"), f.col(\"phenocode\")\n                    ).alias(\"studyId\"),\n                    f.col(\"phenostring\").alias(\"traitFromSource\"),\n                    f.col(\"num_cases\").alias(\"nCases\"),\n                    f.col(\"num_controls\").alias(\"nControls\"),\n                    # Set constant value columns.\n                    f.lit(finngen_release_prefix).alias(\"projectId\"),\n                    f.lit(\"gwas\").alias(\"studyType\"),\n                    f.lit(True).alias(\"hasSumstats\"),\n                    f.lit(\"377,277 (210,870 females and 166,407 males)\").alias(\n                        \"initialSampleSize\"\n                    ),\n                )\n                .withColumn(\"nSamples\", f.col(\"nCases\") + f.col(\"nControls\"))\n                .withColumn(\n                    \"summarystatsLocation\",\n                    f.concat(\n                        f.lit(finngen_sumstat_url_prefix),\n                        f.col(\"studyId\"),\n                        f.lit(finngen_sumstat_url_suffix),\n                    ),\n                )\n            )\n        )\n
"},{"location":"components/dataset/study_index/study_index_finngen/#otg.dataset.study_index.StudyIndexFinnGen.from_source","title":"from_source(finngen_studies, finngen_release_prefix, finngen_sumstat_url_prefix, finngen_sumstat_url_suffix) classmethod","text":"

This function ingests study level metadata from FinnGen.

Parameters:

Name Type Description Default finngen_studies DataFrame

FinnGen raw study table

required finngen_release_prefix str

Release prefix pattern.

required finngen_sumstat_url_prefix str

URL prefix for summary statistics location.

required finngen_sumstat_url_suffix str

URL prefix suffix for summary statistics location.

required

Returns:

Name Type Description StudyIndexFinnGen StudyIndexFinnGen

Parsed and annotated FinnGen study table.

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_source(\n    cls: type[StudyIndexFinnGen],\n    finngen_studies: DataFrame,\n    finngen_release_prefix: str,\n    finngen_sumstat_url_prefix: str,\n    finngen_sumstat_url_suffix: str,\n) -> StudyIndexFinnGen:\n\"\"\"This function ingests study level metadata from FinnGen.\n\n    Args:\n        finngen_studies (DataFrame): FinnGen raw study table\n        finngen_release_prefix (str): Release prefix pattern.\n        finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n        finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n\n    Returns:\n        StudyIndexFinnGen: Parsed and annotated FinnGen study table.\n    \"\"\"\n    return cls(\n        _df=(\n            # Read FinnGen raw data.\n            finngen_studies.select(\n                # Select the desired columns.\n                f.concat(\n                    f.lit(finngen_release_prefix + \"_\"), f.col(\"phenocode\")\n                ).alias(\"studyId\"),\n                f.col(\"phenostring\").alias(\"traitFromSource\"),\n                f.col(\"num_cases\").alias(\"nCases\"),\n                f.col(\"num_controls\").alias(\"nControls\"),\n                # Set constant value columns.\n                f.lit(finngen_release_prefix).alias(\"projectId\"),\n                f.lit(\"gwas\").alias(\"studyType\"),\n                f.lit(True).alias(\"hasSumstats\"),\n                f.lit(\"377,277 (210,870 females and 166,407 males)\").alias(\n                    \"initialSampleSize\"\n                ),\n            )\n            .withColumn(\"nSamples\", f.col(\"nCases\") + f.col(\"nControls\"))\n            .withColumn(\n                \"summarystatsLocation\",\n                f.concat(\n                    f.lit(finngen_sumstat_url_prefix),\n                    f.col(\"studyId\"),\n                    f.lit(finngen_sumstat_url_suffix),\n                ),\n            )\n        )\n    )\n
"},{"location":"components/dataset/study_index/study_index_finngen/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/","title":"Study index gwas catalog","text":"

Bases: StudyIndex

Study index dataset from GWAS Catalog.

The following information is harmonised from the GWAS Catalog:

  • All publication related information retained.
  • Mapped measured and background traits parsed.
  • Flagged if harmonized summary statistics datasets available.
  • If available, the ftp path to these files presented.
  • Ancestries from the discovery and replication stages are structured with sample counts.
  • Case/control counts extracted.
  • The number of samples with European ancestry extracted.
Source code in src/otg/dataset/study_index.py
@dataclass\nclass StudyIndexGWASCatalog(StudyIndex):\n\"\"\"Study index dataset from GWAS Catalog.\n\n    The following information is harmonised from the GWAS Catalog:\n\n    - All publication related information retained.\n    - Mapped measured and background traits parsed.\n    - Flagged if harmonized summary statistics datasets available.\n    - If available, the ftp path to these files presented.\n    - Ancestries from the discovery and replication stages are structured with sample counts.\n    - Case/control counts extracted.\n    - The number of samples with European ancestry extracted.\n\n    \"\"\"\n\n    @staticmethod\n    def _gwas_ancestry_to_gnomad(gwas_catalog_ancestry: Column) -> Column:\n\"\"\"Normalised ancestry column from GWAS Catalog into Gnomad ancestry.\n\n        Args:\n            gwas_catalog_ancestry (Column): GWAS Catalog ancestry\n\n        Returns:\n            Column: mapped Gnomad ancestry using LUT\n        \"\"\"\n        # GWAS Catalog to p-value mapping\n        json_dict = json.loads(\n            pkg_resources.read_text(\n                data, \"gwascat_2_gnomad_superpopulation_map.json\", encoding=\"utf-8\"\n            )\n        )\n        map_expr = f.create_map(*[f.lit(x) for x in chain(*json_dict.items())])\n\n        return f.transform(gwas_catalog_ancestry, lambda x: map_expr[x])\n\n    @classmethod\n    def _parse_study_table(\n        cls: type[StudyIndexGWASCatalog], catalog_studies: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Harmonise GWASCatalog study table with `StudyIndex` schema.\n\n        Args:\n            catalog_studies (DataFrame): GWAS Catalog study table\n\n        Returns:\n            StudyIndexGWASCatalog:\n        \"\"\"\n        return cls(\n            _df=catalog_studies.select(\n                f.coalesce(\n                    f.col(\"STUDY ACCESSION\"), f.monotonically_increasing_id()\n                ).alias(\"studyId\"),\n                f.lit(\"GCST\").alias(\"projectId\"),\n                f.lit(\"gwas\").alias(\"studyType\"),\n                f.col(\"PUBMED ID\").alias(\"pubmedId\"),\n                f.col(\"FIRST AUTHOR\").alias(\"publicationFirstAuthor\"),\n                f.col(\"DATE\").alias(\"publicationDate\"),\n                f.col(\"JOURNAL\").alias(\"publicationJournal\"),\n                f.col(\"STUDY\").alias(\"publicationTitle\"),\n                f.coalesce(f.col(\"DISEASE/TRAIT\"), f.lit(\"Unreported\")).alias(\n                    \"traitFromSource\"\n                ),\n                f.col(\"INITIAL SAMPLE SIZE\").alias(\"initialSampleSize\"),\n                parse_efos(f.col(\"MAPPED_TRAIT_URI\")).alias(\"traitFromSourceMappedIds\"),\n                parse_efos(f.col(\"MAPPED BACKGROUND TRAIT URI\")).alias(\n                    \"backgroundTraitFromSourceMappedIds\"\n                ),\n            )\n        )\n\n    @classmethod\n    def from_source(\n        cls: type[StudyIndexGWASCatalog],\n        catalog_studies: DataFrame,\n        ancestry_file: DataFrame,\n        sumstats_lut: DataFrame,\n    ) -> StudyIndexGWASCatalog:\n\"\"\"This function ingests study level metadata from the GWAS Catalog.\n\n        Args:\n            catalog_studies (DataFrame): GWAS Catalog raw study table\n            ancestry_file (DataFrame): GWAS Catalog ancestry table.\n            sumstats_lut (DataFrame): GWAS Catalog summary statistics list.\n\n        Returns:\n            StudyIndexGWASCatalog: Parsed and annotated GWAS Catalog study table.\n        \"\"\"\n        # Read GWAS Catalogue raw data\n        return (\n            cls._parse_study_table(catalog_studies)\n            ._annotate_ancestries(ancestry_file)\n            ._annotate_sumstats_info(sumstats_lut)\n            ._annotate_discovery_sample_sizes()\n        )\n\n    def get_gnomad_ancestry_sample_sizes(self: StudyIndexGWASCatalog) -> DataFrame:\n\"\"\"Get all studies and their ancestries.\n\n        Returns:\n            DataFrame: containing `studyId`, `gnomadPopulation` and `relativeSampleSize` columns\n        \"\"\"\n        # Study ancestries\n        w_study = Window.partitionBy(\"studyId\")\n        return (\n            self.df\n            # Excluding studies where no sample discription is provided:\n            .filter(f.col(\"discoverySamples\").isNotNull())\n            # Exploding sample description and study identifier:\n            .withColumn(\"discoverySample\", f.explode(f.col(\"discoverySamples\")))\n            # Splitting sample descriptions further:\n            .withColumn(\n                \"ancestries\",\n                f.split(f.col(\"discoverySample.ancestry\"), r\",\\s(?![^()]*\\))\"),\n            )\n            # Dividing sample sizes assuming even distribution\n            .withColumn(\n                \"adjustedSampleSize\",\n                f.col(\"discoverySample.sampleSize\") / f.size(f.col(\"ancestries\")),\n            )\n            # mapped to gnomAD superpopulation and exploded\n            .withColumn(\n                \"gnomadPopulation\",\n                f.explode(\n                    StudyIndexGWASCatalog._gwas_ancestry_to_gnomad(f.col(\"ancestries\"))\n                ),\n            )\n            # Group by studies and aggregate for major population:\n            .groupBy(\"studyId\", \"gnomadPopulation\")\n            .agg(f.sum(f.col(\"adjustedSampleSize\")).alias(\"sampleSize\"))\n            # Calculate proportions for each study\n            .withColumn(\n                \"relativeSampleSize\",\n                f.col(\"sampleSize\") / f.sum(\"sampleSize\").over(w_study),\n            )\n            .drop(\"sampleSize\")\n        )\n\n    def update_study_id(\n        self: StudyIndexGWASCatalog, study_annotation: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n        Args:\n            study_annotation (DataFrame): Dataframe containing `updatedStudyId`, `traitFromSource`, `traitFromSourceMappedIds` and key column `studyId`.\n\n        Returns:\n            StudyIndexGWASCatalog: Updated study table.\n        \"\"\"\n        self.df = (\n            self._df.join(\n                study_annotation.select(\n                    *[\n                        f.col(c).alias(f\"updated{c}\")\n                        if c not in [\"studyId\", \"updatedStudyId\"]\n                        else f.col(c)\n                        for c in study_annotation.columns\n                    ]\n                ),\n                on=\"studyId\",\n                how=\"left\",\n            )\n            .withColumn(\n                \"studyId\",\n                f.coalesce(f.col(\"updatedStudyId\"), f.col(\"studyId\")),\n            )\n            .withColumn(\n                \"traitFromSource\",\n                f.coalesce(f.col(\"updatedtraitFromSource\"), f.col(\"traitFromSource\")),\n            )\n            .withColumn(\n                \"traitFromSourceMappedIds\",\n                f.coalesce(\n                    f.col(\"updatedtraitFromSourceMappedIds\"),\n                    f.col(\"traitFromSourceMappedIds\"),\n                ),\n            )\n            .select(self._df.columns)\n        )\n\n        return self\n\n    def _annotate_ancestries(\n        self: StudyIndexGWASCatalog, ancestry_lut: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Extracting sample sizes and ancestry information.\n\n        This function parses the ancestry data. Also get counts for the europeans in the same\n        discovery stage.\n\n        Args:\n            ancestry_lut (DataFrame): Ancestry table as downloaded from the GWAS Catalog\n\n        Returns:\n            StudyIndexGWASCatalog: Slimmed and cleaned version of the ancestry annotation.\n        \"\"\"\n        ancestry = (\n            ancestry_lut\n            # Convert column headers to camelcase:\n            .transform(\n                lambda df: df.select(\n                    *[f.expr(column2camel_case(x)) for x in df.columns]\n                )\n            ).withColumnRenamed(\n                \"studyAccession\", \"studyId\"\n            )  # studyId has not been split yet\n        )\n\n        # Get a high resolution dataset on experimental stage:\n        ancestry_stages = (\n            ancestry.groupBy(\"studyId\")\n            .pivot(\"stage\")\n            .agg(\n                f.collect_set(\n                    f.struct(\n                        f.col(\"numberOfIndividuals\").alias(\"sampleSize\"),\n                        f.col(\"broadAncestralCategory\").alias(\"ancestry\"),\n                    )\n                )\n            )\n            .withColumnRenamed(\"initial\", \"discoverySamples\")\n            .withColumnRenamed(\"replication\", \"replicationSamples\")\n            .persist()\n        )\n\n        # Generate information on the ancestry composition of the discovery stage, and calculate\n        # the proportion of the Europeans:\n        europeans_deconvoluted = (\n            ancestry\n            # Focus on discovery stage:\n            .filter(f.col(\"stage\") == \"initial\")\n            # Sorting ancestries if European:\n            .withColumn(\n                \"ancestryFlag\",\n                # Excluding finnish:\n                f.when(\n                    f.col(\"initialSampleDescription\").contains(\"Finnish\"),\n                    f.lit(\"other\"),\n                )\n                # Excluding Icelandic population:\n                .when(\n                    f.col(\"initialSampleDescription\").contains(\"Icelandic\"),\n                    f.lit(\"other\"),\n                )\n                # Including European ancestry:\n                .when(f.col(\"broadAncestralCategory\") == \"European\", f.lit(\"european\"))\n                # Exclude all other population:\n                .otherwise(\"other\"),\n            )\n            # Grouping by study accession and initial sample description:\n            .groupBy(\"studyId\")\n            .pivot(\"ancestryFlag\")\n            .agg(\n                # Summarizing sample sizes for all ancestries:\n                f.sum(f.col(\"numberOfIndividuals\"))\n            )\n            # Do arithmetics to make sure we have the right proportion of european in the set:\n            .withColumn(\n                \"initialSampleCountEuropean\",\n                f.when(f.col(\"european\").isNull(), f.lit(0)).otherwise(\n                    f.col(\"european\")\n                ),\n            )\n            .withColumn(\n                \"initialSampleCountOther\",\n                f.when(f.col(\"other\").isNull(), f.lit(0)).otherwise(f.col(\"other\")),\n            )\n            .withColumn(\n                \"initialSampleCount\",\n                f.col(\"initialSampleCountEuropean\") + f.col(\"other\"),\n            )\n            .drop(\n                \"european\",\n                \"other\",\n                \"initialSampleCount\",\n                \"initialSampleCountEuropean\",\n                \"initialSampleCountOther\",\n            )\n        )\n\n        parsed_ancestry_lut = ancestry_stages.join(\n            europeans_deconvoluted, on=\"studyId\", how=\"outer\"\n        )\n\n        self.df = self.df.join(parsed_ancestry_lut, on=\"studyId\", how=\"left\")\n        return self\n\n    def _annotate_sumstats_info(\n        self: StudyIndexGWASCatalog, sumstats_lut: DataFrame\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Annotate summary stat locations.\n\n        Args:\n            sumstats_lut (DataFrame): listing GWAS Catalog summary stats paths\n\n        Returns:\n            StudyIndexGWASCatalog: including `summarystatsLocation` and `hasSumstats` columns\n        \"\"\"\n        gwas_sumstats_base_uri = (\n            \"ftp://ftp.ebi.ac.uk/pub/databases/gwas/summary_statistics/\"\n        )\n\n        parsed_sumstats_lut = sumstats_lut.withColumn(\n            \"summarystatsLocation\",\n            f.concat(\n                f.lit(gwas_sumstats_base_uri),\n                f.regexp_replace(f.col(\"_c0\"), r\"^\\.\\/\", \"\"),\n            ),\n        ).select(\n            f.regexp_extract(f.col(\"summarystatsLocation\"), r\"\\/(GCST\\d+)\\/\", 1).alias(\n                \"studyId\"\n            ),\n            \"summarystatsLocation\",\n            f.lit(True).alias(\"hasSumstats\"),\n        )\n\n        self.df = (\n            self.df.drop(\"hasSumstats\")\n            .join(parsed_sumstats_lut, on=\"studyId\", how=\"left\")\n            .withColumn(\"hasSumstats\", f.coalesce(f.col(\"hasSumstats\"), f.lit(False)))\n        )\n        return self\n\n    def _annotate_discovery_sample_sizes(\n        self: StudyIndexGWASCatalog,\n    ) -> StudyIndexGWASCatalog:\n\"\"\"Extract the sample size of the discovery stage of the study as annotated in the GWAS Catalog.\n\n        For some studies that measure quantitative traits, nCases and nControls can't be extracted. Therefore, we assume these are 0.\n\n        Returns:\n            StudyIndexGWASCatalog: object with columns `nCases`, `nControls`, and `nSamples` per `studyId` correctly extracted.\n        \"\"\"\n        sample_size_lut = (\n            self.df.select(\n                \"studyId\",\n                f.explode_outer(f.split(f.col(\"initialSampleSize\"), r\",\\s+\")).alias(\n                    \"samples\"\n                ),\n            )\n            # Extracting the sample size from the string:\n            .withColumn(\n                \"sampleSize\",\n                f.regexp_extract(\n                    f.regexp_replace(f.col(\"samples\"), \",\", \"\"), r\"[0-9,]+\", 0\n                ).cast(t.IntegerType()),\n            )\n            .select(\n                \"studyId\",\n                \"sampleSize\",\n                f.when(f.col(\"samples\").contains(\"cases\"), f.col(\"sampleSize\"))\n                .otherwise(f.lit(0))\n                .alias(\"nCases\"),\n                f.when(f.col(\"samples\").contains(\"controls\"), f.col(\"sampleSize\"))\n                .otherwise(f.lit(0))\n                .alias(\"nControls\"),\n            )\n            # Aggregating sample sizes for all ancestries:\n            .groupBy(\"studyId\")  # studyId has not been split yet\n            .agg(\n                f.sum(\"nCases\").alias(\"nCases\"),\n                f.sum(\"nControls\").alias(\"nControls\"),\n                f.sum(\"sampleSize\").alias(\"nSamples\"),\n            )\n        )\n        self.df = self.df.join(sample_size_lut, on=\"studyId\", how=\"left\")\n        return self\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.from_source","title":"from_source(catalog_studies, ancestry_file, sumstats_lut) classmethod","text":"

This function ingests study level metadata from the GWAS Catalog.

Parameters:

Name Type Description Default catalog_studies DataFrame

GWAS Catalog raw study table

required ancestry_file DataFrame

GWAS Catalog ancestry table.

required sumstats_lut DataFrame

GWAS Catalog summary statistics list.

required

Returns:

Name Type Description StudyIndexGWASCatalog StudyIndexGWASCatalog

Parsed and annotated GWAS Catalog study table.

Source code in src/otg/dataset/study_index.py
@classmethod\ndef from_source(\n    cls: type[StudyIndexGWASCatalog],\n    catalog_studies: DataFrame,\n    ancestry_file: DataFrame,\n    sumstats_lut: DataFrame,\n) -> StudyIndexGWASCatalog:\n\"\"\"This function ingests study level metadata from the GWAS Catalog.\n\n    Args:\n        catalog_studies (DataFrame): GWAS Catalog raw study table\n        ancestry_file (DataFrame): GWAS Catalog ancestry table.\n        sumstats_lut (DataFrame): GWAS Catalog summary statistics list.\n\n    Returns:\n        StudyIndexGWASCatalog: Parsed and annotated GWAS Catalog study table.\n    \"\"\"\n    # Read GWAS Catalogue raw data\n    return (\n        cls._parse_study_table(catalog_studies)\n        ._annotate_ancestries(ancestry_file)\n        ._annotate_sumstats_info(sumstats_lut)\n        ._annotate_discovery_sample_sizes()\n    )\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.get_gnomad_ancestry_sample_sizes","title":"get_gnomad_ancestry_sample_sizes()","text":"

Get all studies and their ancestries.

Returns:

Name Type Description DataFrame DataFrame

containing studyId, gnomadPopulation and relativeSampleSize columns

Source code in src/otg/dataset/study_index.py
def get_gnomad_ancestry_sample_sizes(self: StudyIndexGWASCatalog) -> DataFrame:\n\"\"\"Get all studies and their ancestries.\n\n    Returns:\n        DataFrame: containing `studyId`, `gnomadPopulation` and `relativeSampleSize` columns\n    \"\"\"\n    # Study ancestries\n    w_study = Window.partitionBy(\"studyId\")\n    return (\n        self.df\n        # Excluding studies where no sample discription is provided:\n        .filter(f.col(\"discoverySamples\").isNotNull())\n        # Exploding sample description and study identifier:\n        .withColumn(\"discoverySample\", f.explode(f.col(\"discoverySamples\")))\n        # Splitting sample descriptions further:\n        .withColumn(\n            \"ancestries\",\n            f.split(f.col(\"discoverySample.ancestry\"), r\",\\s(?![^()]*\\))\"),\n        )\n        # Dividing sample sizes assuming even distribution\n        .withColumn(\n            \"adjustedSampleSize\",\n            f.col(\"discoverySample.sampleSize\") / f.size(f.col(\"ancestries\")),\n        )\n        # mapped to gnomAD superpopulation and exploded\n        .withColumn(\n            \"gnomadPopulation\",\n            f.explode(\n                StudyIndexGWASCatalog._gwas_ancestry_to_gnomad(f.col(\"ancestries\"))\n            ),\n        )\n        # Group by studies and aggregate for major population:\n        .groupBy(\"studyId\", \"gnomadPopulation\")\n        .agg(f.sum(f.col(\"adjustedSampleSize\")).alias(\"sampleSize\"))\n        # Calculate proportions for each study\n        .withColumn(\n            \"relativeSampleSize\",\n            f.col(\"sampleSize\") / f.sum(\"sampleSize\").over(w_study),\n        )\n        .drop(\"sampleSize\")\n    )\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#otg.dataset.study_index.StudyIndexGWASCatalog.update_study_id","title":"update_study_id(study_annotation)","text":"

Update studyId with a dataframe containing study.

Parameters:

Name Type Description Default study_annotation DataFrame

Dataframe containing updatedStudyId, traitFromSource, traitFromSourceMappedIds and key column studyId.

required

Returns:

Name Type Description StudyIndexGWASCatalog StudyIndexGWASCatalog

Updated study table.

Source code in src/otg/dataset/study_index.py
def update_study_id(\n    self: StudyIndexGWASCatalog, study_annotation: DataFrame\n) -> StudyIndexGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n    Args:\n        study_annotation (DataFrame): Dataframe containing `updatedStudyId`, `traitFromSource`, `traitFromSourceMappedIds` and key column `studyId`.\n\n    Returns:\n        StudyIndexGWASCatalog: Updated study table.\n    \"\"\"\n    self.df = (\n        self._df.join(\n            study_annotation.select(\n                *[\n                    f.col(c).alias(f\"updated{c}\")\n                    if c not in [\"studyId\", \"updatedStudyId\"]\n                    else f.col(c)\n                    for c in study_annotation.columns\n                ]\n            ),\n            on=\"studyId\",\n            how=\"left\",\n        )\n        .withColumn(\n            \"studyId\",\n            f.coalesce(f.col(\"updatedStudyId\"), f.col(\"studyId\")),\n        )\n        .withColumn(\n            \"traitFromSource\",\n            f.coalesce(f.col(\"updatedtraitFromSource\"), f.col(\"traitFromSource\")),\n        )\n        .withColumn(\n            \"traitFromSourceMappedIds\",\n            f.coalesce(\n                f.col(\"updatedtraitFromSourceMappedIds\"),\n                f.col(\"traitFromSourceMappedIds\"),\n            ),\n        )\n        .select(self._df.columns)\n    )\n\n    return self\n
"},{"location":"components/dataset/study_index/study_index_gwas_catalog/#schema","title":"Schema","text":"
root\n |-- studyId: string (nullable = false)\n |-- projectId: string (nullable = false)\n |-- studyType: string (nullable = false)\n |-- traitFromSource: string (nullable = false)\n |-- traitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- pubmedId: string (nullable = true)\n |-- publicationTitle: string (nullable = true)\n |-- publicationFirstAuthor: string (nullable = true)\n |-- publicationDate: string (nullable = true)\n |-- publicationJournal: string (nullable = true)\n |-- backgroundTraitFromSourceMappedIds: array (nullable = true)\n |    |-- element: string (containsNull = true)\n |-- initialSampleSize: string (nullable = true)\n |-- nCases: long (nullable = true)\n |-- nControls: long (nullable = true)\n |-- nSamples: long (nullable = true)\n |-- discoverySamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- replicationSamples: array (nullable = true)\n |    |-- element: struct (containsNull = false)\n |    |    |-- sampleSize: string (nullable = true)\n |    |    |-- ancestry: string (nullable = true)\n |-- summarystatsLocation: string (nullable = true)\n |-- hasSumstats: boolean (nullable = true)\n
"},{"location":"components/dataset/study_locus/_study_locus/","title":"Study-locus","text":"

Bases: Dataset

Study-Locus dataset.

This dataset captures associations between study/traits and a genetic loci as provided by finemapping methods.

Source code in src/otg/dataset/study_locus.py
@dataclass\nclass StudyLocus(Dataset):\n\"\"\"Study-Locus dataset.\n\n    This dataset captures associations between study/traits and a genetic loci as provided by finemapping methods.\n    \"\"\"\n\n    _schema: StructType = parse_spark_schema(\"study_locus.json\")\n\n    @staticmethod\n    def _overlapping_peaks(credset_to_overlap: DataFrame) -> DataFrame:\n\"\"\"Calculate overlapping signals (study-locus) between GWAS-GWAS and GWAS-Molecular trait.\n\n        Args:\n            credset_to_overlap (DataFrame): DataFrame containing at least `studyLocusId`, `studyType`, `chromosome` and `tagVariantId` columns.\n\n        Returns:\n            DataFrame: containing `left_studyLocusId`, `right_studyLocusId` and `chromosome` columns.\n        \"\"\"\n        # Reduce columns to the minimum to reduce the size of the dataframe\n        credset_to_overlap = credset_to_overlap.select(\n            \"studyLocusId\", \"studyType\", \"chromosome\", \"tagVariantId\"\n        )\n        return (\n            credset_to_overlap.alias(\"left\")\n            .filter(f.col(\"studyType\") == \"gwas\")\n            # Self join with complex condition. Left it's all gwas and right can be gwas or molecular trait\n            .join(\n                credset_to_overlap.alias(\"right\"),\n                on=[\n                    f.col(\"left.chromosome\") == f.col(\"right.chromosome\"),\n                    f.col(\"left.tagVariantId\") == f.col(\"right.tagVariantId\"),\n                    (f.col(\"right.studyType\") != \"gwas\")\n                    | (f.col(\"left.studyLocusId\") > f.col(\"right.studyLocusId\")),\n                ],\n                how=\"inner\",\n            )\n            .select(\n                f.col(\"left.studyLocusId\").alias(\"left_studyLocusId\"),\n                f.col(\"right.studyLocusId\").alias(\"right_studyLocusId\"),\n                f.col(\"left.chromosome\").alias(\"chromosome\"),\n            )\n            .distinct()\n            .repartition(\"chromosome\")\n            .persist()\n        )\n\n    @staticmethod\n    def _align_overlapping_tags(\n        credset_to_overlap: DataFrame, peak_overlaps: DataFrame\n    ) -> StudyLocusOverlap:\n\"\"\"Align overlapping tags in pairs of overlapping study-locus, keeping all tags in both loci.\n\n        Args:\n            credset_to_overlap (DataFrame): containing `studyLocusId`, `studyType`, `chromosome`, `tagVariantId`, `logABF` and `posteriorProbability` columns.\n            peak_overlaps (DataFrame): containing `left_studyLocusId`, `right_studyLocusId` and `chromosome` columns.\n\n        Returns:\n            StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n        \"\"\"\n        # Complete information about all tags in the left study-locus of the overlap\n        overlapping_left = credset_to_overlap.select(\n            f.col(\"chromosome\"),\n            f.col(\"tagVariantId\"),\n            f.col(\"studyLocusId\").alias(\"left_studyLocusId\"),\n            f.col(\"logABF\").alias(\"left_logABF\"),\n            f.col(\"posteriorProbability\").alias(\"left_posteriorProbability\"),\n        ).join(peak_overlaps, on=[\"chromosome\", \"left_studyLocusId\"], how=\"inner\")\n\n        # Complete information about all tags in the right study-locus of the overlap\n        overlapping_right = credset_to_overlap.select(\n            f.col(\"chromosome\"),\n            f.col(\"tagVariantId\"),\n            f.col(\"studyLocusId\").alias(\"right_studyLocusId\"),\n            f.col(\"logABF\").alias(\"right_logABF\"),\n            f.col(\"posteriorProbability\").alias(\"right_posteriorProbability\"),\n        ).join(peak_overlaps, on=[\"chromosome\", \"right_studyLocusId\"], how=\"inner\")\n\n        # Include information about all tag variants in both study-locus aligned by tag variant id\n        return StudyLocusOverlap(\n            _df=overlapping_left.join(\n                overlapping_right,\n                on=[\n                    \"chromosome\",\n                    \"right_studyLocusId\",\n                    \"left_studyLocusId\",\n                    \"tagVariantId\",\n                ],\n                how=\"outer\",\n            )\n            # ensures nullable=false for following columns\n            .fillna(\n                value=\"unknown\",\n                subset=[\n                    \"chromosome\",\n                    \"right_studyLocusId\",\n                    \"left_studyLocusId\",\n                    \"tagVariantId\",\n                ],\n            )\n        )\n\n    @staticmethod\n    def _update_quality_flag(\n        qc: Column, flag_condition: Column, flag_text: StudyLocusQualityCheck\n    ) -> Column:\n\"\"\"Update the provided quality control list with a new flag if condition is met.\n\n        Args:\n            qc (Column): Array column with the current list of qc flags.\n            flag_condition (Column): This is a column of booleans, signing which row should be flagged\n            flag_text (StudyLocusQualityCheck): Text for the new quality control flag\n\n        Returns:\n            Column: Array column with the updated list of qc flags.\n        \"\"\"\n        qc = f.when(qc.isNull(), f.array()).otherwise(qc)\n        return f.when(\n            flag_condition,\n            f.array_union(qc, f.array(f.lit(flag_text.value))),\n        ).otherwise(qc)\n\n    @classmethod\n    def from_parquet(cls: type[StudyLocus], session: Session, path: str) -> StudyLocus:\n\"\"\"Initialise StudyLocus from parquet file.\n\n        Args:\n            session (Session): spark session\n            path (str): Path to parquet file\n\n        Returns:\n            StudyLocus: Study-locus dataset\n        \"\"\"\n        df = session.read_parquet(path=path, schema=cls._schema)\n        return cls(_df=df, _schema=cls._schema)\n\n    def credible_set(\n        self: StudyLocus,\n        credible_interval: CredibleInterval,\n    ) -> StudyLocus:\n\"\"\"Filter study-locus tag variants based on given credible interval.\n\n        Args:\n            credible_interval (CredibleInterval): Credible interval to filter for.\n\n        Returns:\n            StudyLocus: Filtered study-locus dataset.\n        \"\"\"\n        self.df = self._df.withColumn(\n            \"credibleSet\",\n            f.expr(f\"filter(credibleSet, tag -> (tag.{credible_interval.value}))\"),\n        )\n        return self\n\n    def overlaps(self: StudyLocus, study_index: StudyIndex) -> StudyLocusOverlap:\n\"\"\"Calculate overlapping study-locus.\n\n        Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always\n        appearing on the right side.\n\n        Args:\n            study_index (StudyIndex): Study index to resolve study types.\n\n        Returns:\n            StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n        \"\"\"\n        credset_to_overlap = (\n            self.df.join(study_index.study_type_lut(), on=\"studyId\", how=\"inner\")\n            .withColumn(\"credibleSet\", f.explode(\"credibleSet\"))\n            .select(\n                \"studyLocusId\",\n                \"studyType\",\n                \"chromosome\",\n                f.col(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n                f.col(\"credibleSet.logABF\").alias(\"logABF\"),\n                f.col(\"credibleSet.posteriorProbability\").alias(\"posteriorProbability\"),\n            )\n            .persist()\n        )\n\n        # overlapping study-locus\n        peak_overlaps = self._overlapping_peaks(credset_to_overlap)\n\n        # study-locus overlap by aligning overlapping variants\n        return self._align_overlapping_tags(credset_to_overlap, peak_overlaps)\n\n    def unique_lead_tag_variants(self: StudyLocus) -> DataFrame:\n\"\"\"All unique lead and tag variants contained in the `StudyLocus` dataframe.\n\n        Returns:\n            DataFrame: A dataframe containing `variantId` and `chromosome` columns.\n        \"\"\"\n        lead_tags = (\n            self.df.select(\n                f.col(\"variantId\"),\n                f.col(\"chromosome\"),\n                f.explode(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n            )\n            .repartition(\"chromosome\")\n            .persist()\n        )\n        return (\n            lead_tags.select(\"variantId\", \"chromosome\")\n            .union(\n                lead_tags.select(f.col(\"tagVariantId\").alias(\"variantId\"), \"chromosome\")\n            )\n            .distinct()\n        )\n\n    def unique_study_locus_ancestries(\n        self: StudyLocus, studies: StudyIndexGWASCatalog\n    ) -> DataFrame:\n\"\"\"All unique lead variant and ancestries contained in the `StudyLocus`.\n\n        Args:\n            studies (StudyIndexGWASCatalog): Metadata about studies in the `StudyLocus`.\n\n        Returns:\n            DataFrame: unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]\n\n        Note:\n            This method is only available for GWAS Catalog studies.\n        \"\"\"\n        return (\n            self.df.join(\n                studies.get_gnomad_ancestry_sample_sizes(), on=\"studyId\", how=\"left\"\n            )\n            .filter(f.col(\"position\").isNotNull())\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"studyId\",\n                \"gnomadPopulation\",\n                \"relativeSampleSize\",\n            )\n            .distinct()\n        )\n\n    def neglog_pvalue(self: StudyLocus) -> Column:\n\"\"\"Returns the negative log p-value.\n\n        Returns:\n            Column: Negative log p-value\n        \"\"\"\n        return calculate_neglog_pvalue(\n            self.df.pValueMantissa,\n            self.df.pValueExponent,\n        )\n\n    def annotate_credible_sets(self: StudyLocus) -> StudyLocus:\n\"\"\"Annotate study-locus dataset with credible set flags.\n\n        Sorts the array in the `credibleSet` column elements by their `posteriorProbability` values in descending order and adds\n        `is95CredibleSet` and `is99CredibleSet` fields to the elements, indicating which are the tagging variants whose cumulative sum\n        of their `posteriorProbability` values is below 0.95 and 0.99, respectively.\n\n        Returns:\n            StudyLocus: including annotation on `is95CredibleSet` and `is99CredibleSet`.\n        \"\"\"\n        self.df = self.df.withColumn(\n            # Sort credible set by posterior probability in descending order\n            \"credibleSet\",\n            f.when(\n                f.size(f.col(\"credibleSet\")) > 0,\n                order_array_of_structs_by_field(\"credibleSet\", \"posteriorProbability\"),\n            ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n        ).withColumn(\n            # Calculate array of cumulative sums of posterior probabilities to determine which variants are in the 95% and 99% credible sets\n            # and zip the cumulative sums array with the credible set array to add the flags\n            \"credibleSet\",\n            f.when(\n                f.size(f.col(\"credibleSet\")) > 0,\n                f.zip_with(\n                    f.col(\"credibleSet\"),\n                    f.transform(\n                        f.sequence(f.lit(1), f.size(f.col(\"credibleSet\"))),\n                        lambda index: f.aggregate(\n                            f.slice(\n                                # By using `index - 1` we introduce a value of `0.0` in the cumulative sums array. to ensure that the last variant\n                                # that exceeds the 0.95 threshold is included in the cumulative sum, as its probability is necessary to satisfy the threshold.\n                                f.col(\"credibleSet.posteriorProbability\"),\n                                1,\n                                index - 1,\n                            ),\n                            f.lit(0.0),\n                            lambda acc, el: acc + el,\n                        ),\n                    ),\n                    lambda struct_e, acc: struct_e.withField(\n                        CredibleInterval.IS95.value, acc < 0.95\n                    ).withField(CredibleInterval.IS99.value, acc < 0.99),\n                ),\n            ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n        )\n        return self\n\n    def clump(self: StudyLocus) -> StudyLocus:\n\"\"\"Perform LD clumping of the studyLocus.\n\n        Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n        Returns:\n            StudyLocus: with empty credible sets for linked variants and QC flag.\n        \"\"\"\n        self.df = (\n            self.df.withColumn(\n                \"is_lead_linked\",\n                LDclumping._is_lead_linked(\n                    self.df.studyId,\n                    self.df.variantId,\n                    self.df.pValueExponent,\n                    self.df.pValueMantissa,\n                    self.df.credibleSet,\n                ),\n            )\n            .withColumn(\n                \"credibleSet\",\n                f.when(f.col(\"is_lead_linked\"), f.array()).otherwise(\n                    f.col(\"credibleSet\")\n                ),\n            )\n            .withColumn(\n                \"qualityControls\",\n                StudyLocus._update_quality_flag(\n                    f.col(\"qualityControls\"),\n                    f.col(\"is_lead_linked\"),\n                    StudyLocusQualityCheck.LD_CLUMPED,\n                ),\n            )\n            .drop(\"is_lead_linked\")\n        )\n        return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.annotate_credible_sets","title":"annotate_credible_sets()","text":"

Annotate study-locus dataset with credible set flags.

Sorts the array in the credibleSet column elements by their posteriorProbability values in descending order and adds is95CredibleSet and is99CredibleSet fields to the elements, indicating which are the tagging variants whose cumulative sum of their posteriorProbability values is below 0.95 and 0.99, respectively.

Returns:

Name Type Description StudyLocus StudyLocus

including annotation on is95CredibleSet and is99CredibleSet.

Source code in src/otg/dataset/study_locus.py
def annotate_credible_sets(self: StudyLocus) -> StudyLocus:\n\"\"\"Annotate study-locus dataset with credible set flags.\n\n    Sorts the array in the `credibleSet` column elements by their `posteriorProbability` values in descending order and adds\n    `is95CredibleSet` and `is99CredibleSet` fields to the elements, indicating which are the tagging variants whose cumulative sum\n    of their `posteriorProbability` values is below 0.95 and 0.99, respectively.\n\n    Returns:\n        StudyLocus: including annotation on `is95CredibleSet` and `is99CredibleSet`.\n    \"\"\"\n    self.df = self.df.withColumn(\n        # Sort credible set by posterior probability in descending order\n        \"credibleSet\",\n        f.when(\n            f.size(f.col(\"credibleSet\")) > 0,\n            order_array_of_structs_by_field(\"credibleSet\", \"posteriorProbability\"),\n        ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n    ).withColumn(\n        # Calculate array of cumulative sums of posterior probabilities to determine which variants are in the 95% and 99% credible sets\n        # and zip the cumulative sums array with the credible set array to add the flags\n        \"credibleSet\",\n        f.when(\n            f.size(f.col(\"credibleSet\")) > 0,\n            f.zip_with(\n                f.col(\"credibleSet\"),\n                f.transform(\n                    f.sequence(f.lit(1), f.size(f.col(\"credibleSet\"))),\n                    lambda index: f.aggregate(\n                        f.slice(\n                            # By using `index - 1` we introduce a value of `0.0` in the cumulative sums array. to ensure that the last variant\n                            # that exceeds the 0.95 threshold is included in the cumulative sum, as its probability is necessary to satisfy the threshold.\n                            f.col(\"credibleSet.posteriorProbability\"),\n                            1,\n                            index - 1,\n                        ),\n                        f.lit(0.0),\n                        lambda acc, el: acc + el,\n                    ),\n                ),\n                lambda struct_e, acc: struct_e.withField(\n                    CredibleInterval.IS95.value, acc < 0.95\n                ).withField(CredibleInterval.IS99.value, acc < 0.99),\n            ),\n        ).when(f.size(f.col(\"credibleSet\")) == 0, f.col(\"credibleSet\")),\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.clump","title":"clump()","text":"

Perform LD clumping of the studyLocus.

Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.

Returns:

Name Type Description StudyLocus StudyLocus

with empty credible sets for linked variants and QC flag.

Source code in src/otg/dataset/study_locus.py
def clump(self: StudyLocus) -> StudyLocus:\n\"\"\"Perform LD clumping of the studyLocus.\n\n    Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n    Returns:\n        StudyLocus: with empty credible sets for linked variants and QC flag.\n    \"\"\"\n    self.df = (\n        self.df.withColumn(\n            \"is_lead_linked\",\n            LDclumping._is_lead_linked(\n                self.df.studyId,\n                self.df.variantId,\n                self.df.pValueExponent,\n                self.df.pValueMantissa,\n                self.df.credibleSet,\n            ),\n        )\n        .withColumn(\n            \"credibleSet\",\n            f.when(f.col(\"is_lead_linked\"), f.array()).otherwise(\n                f.col(\"credibleSet\")\n            ),\n        )\n        .withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.col(\"is_lead_linked\"),\n                StudyLocusQualityCheck.LD_CLUMPED,\n            ),\n        )\n        .drop(\"is_lead_linked\")\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.credible_set","title":"credible_set(credible_interval)","text":"

Filter study-locus tag variants based on given credible interval.

Parameters:

Name Type Description Default credible_interval CredibleInterval

Credible interval to filter for.

required

Returns:

Name Type Description StudyLocus StudyLocus

Filtered study-locus dataset.

Source code in src/otg/dataset/study_locus.py
def credible_set(\n    self: StudyLocus,\n    credible_interval: CredibleInterval,\n) -> StudyLocus:\n\"\"\"Filter study-locus tag variants based on given credible interval.\n\n    Args:\n        credible_interval (CredibleInterval): Credible interval to filter for.\n\n    Returns:\n        StudyLocus: Filtered study-locus dataset.\n    \"\"\"\n    self.df = self._df.withColumn(\n        \"credibleSet\",\n        f.expr(f\"filter(credibleSet, tag -> (tag.{credible_interval.value}))\"),\n    )\n    return self\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.from_parquet","title":"from_parquet(session, path) classmethod","text":"

Initialise StudyLocus from parquet file.

Parameters:

Name Type Description Default session Session

spark session

required path str

Path to parquet file

required

Returns:

Name Type Description StudyLocus StudyLocus

Study-locus dataset

Source code in src/otg/dataset/study_locus.py
@classmethod\ndef from_parquet(cls: type[StudyLocus], session: Session, path: str) -> StudyLocus:\n\"\"\"Initialise StudyLocus from parquet file.\n\n    Args:\n        session (Session): spark session\n        path (str): Path to parquet file\n\n    Returns:\n        StudyLocus: Study-locus dataset\n    \"\"\"\n    df = session.read_parquet(path=path, schema=cls._schema)\n    return cls(_df=df, _schema=cls._schema)\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.neglog_pvalue","title":"neglog_pvalue()","text":"

Returns the negative log p-value.

Returns:

Name Type Description Column Column

Negative log p-value

Source code in src/otg/dataset/study_locus.py
def neglog_pvalue(self: StudyLocus) -> Column:\n\"\"\"Returns the negative log p-value.\n\n    Returns:\n        Column: Negative log p-value\n    \"\"\"\n    return calculate_neglog_pvalue(\n        self.df.pValueMantissa,\n        self.df.pValueExponent,\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.overlaps","title":"overlaps(study_index)","text":"

Calculate overlapping study-locus.

Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always appearing on the right side.

Parameters:

Name Type Description Default study_index StudyIndex

Study index to resolve study types.

required

Returns:

Name Type Description StudyLocusOverlap StudyLocusOverlap

Pairs of overlapping study-locus with aligned tags.

Source code in src/otg/dataset/study_locus.py
def overlaps(self: StudyLocus, study_index: StudyIndex) -> StudyLocusOverlap:\n\"\"\"Calculate overlapping study-locus.\n\n    Find overlapping study-locus that share at least one tagging variant. All GWAS-GWAS and all GWAS-Molecular traits are computed with the Molecular traits always\n    appearing on the right side.\n\n    Args:\n        study_index (StudyIndex): Study index to resolve study types.\n\n    Returns:\n        StudyLocusOverlap: Pairs of overlapping study-locus with aligned tags.\n    \"\"\"\n    credset_to_overlap = (\n        self.df.join(study_index.study_type_lut(), on=\"studyId\", how=\"inner\")\n        .withColumn(\"credibleSet\", f.explode(\"credibleSet\"))\n        .select(\n            \"studyLocusId\",\n            \"studyType\",\n            \"chromosome\",\n            f.col(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n            f.col(\"credibleSet.logABF\").alias(\"logABF\"),\n            f.col(\"credibleSet.posteriorProbability\").alias(\"posteriorProbability\"),\n        )\n        .persist()\n    )\n\n    # overlapping study-locus\n    peak_overlaps = self._overlapping_peaks(credset_to_overlap)\n\n    # study-locus overlap by aligning overlapping variants\n    return self._align_overlapping_tags(credset_to_overlap, peak_overlaps)\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.unique_lead_tag_variants","title":"unique_lead_tag_variants()","text":"

All unique lead and tag variants contained in the StudyLocus dataframe.

Returns:

Name Type Description DataFrame DataFrame

A dataframe containing variantId and chromosome columns.

Source code in src/otg/dataset/study_locus.py
def unique_lead_tag_variants(self: StudyLocus) -> DataFrame:\n\"\"\"All unique lead and tag variants contained in the `StudyLocus` dataframe.\n\n    Returns:\n        DataFrame: A dataframe containing `variantId` and `chromosome` columns.\n    \"\"\"\n    lead_tags = (\n        self.df.select(\n            f.col(\"variantId\"),\n            f.col(\"chromosome\"),\n            f.explode(\"credibleSet.tagVariantId\").alias(\"tagVariantId\"),\n        )\n        .repartition(\"chromosome\")\n        .persist()\n    )\n    return (\n        lead_tags.select(\"variantId\", \"chromosome\")\n        .union(\n            lead_tags.select(f.col(\"tagVariantId\").alias(\"variantId\"), \"chromosome\")\n        )\n        .distinct()\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#otg.dataset.study_locus.StudyLocus.unique_study_locus_ancestries","title":"unique_study_locus_ancestries(studies)","text":"

All unique lead variant and ancestries contained in the StudyLocus.

Parameters:

Name Type Description Default studies StudyIndexGWASCatalog

Metadata about studies in the StudyLocus.

required

Returns:

Name Type Description DataFrame DataFrame

unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]

Note

This method is only available for GWAS Catalog studies.

Source code in src/otg/dataset/study_locus.py
def unique_study_locus_ancestries(\n    self: StudyLocus, studies: StudyIndexGWASCatalog\n) -> DataFrame:\n\"\"\"All unique lead variant and ancestries contained in the `StudyLocus`.\n\n    Args:\n        studies (StudyIndexGWASCatalog): Metadata about studies in the `StudyLocus`.\n\n    Returns:\n        DataFrame: unique [\"variantId\", \"studyId\", \"gnomadPopulation\", \"chromosome\", \"relativeSampleSize\"]\n\n    Note:\n        This method is only available for GWAS Catalog studies.\n    \"\"\"\n    return (\n        self.df.join(\n            studies.get_gnomad_ancestry_sample_sizes(), on=\"studyId\", how=\"left\"\n        )\n        .filter(f.col(\"position\").isNotNull())\n        .select(\n            \"variantId\",\n            \"chromosome\",\n            \"studyId\",\n            \"gnomadPopulation\",\n            \"relativeSampleSize\",\n        )\n        .distinct()\n    )\n
"},{"location":"components/dataset/study_locus/_study_locus/#schema","title":"Schema","text":"
root\n |-- studyLocusId: long (nullable = false)\n |-- variantId: string (nullable = false)\n |-- chromosome: string (nullable = true)\n |-- position: integer (nullable = true)\n |-- studyId: string (nullable = false)\n |-- beta: double (nullable = true)\n |-- oddsRatio: double (nullable = true)\n |-- oddsRatioConfidenceIntervalLower: double (nullable = true)\n |-- oddsRatioConfidenceIntervalUpper: double (nullable = true)\n |-- betaConfidenceIntervalLower: double (nullable = true)\n |-- betaConfidenceIntervalUpper: double (nullable = true)\n |-- pValueMantissa: float (nullable = true)\n |-- pValueExponent: integer (nullable = true)\n |-- effectAlleleFrequencyFromSource: float (nullable = true)\n |-- standardError: double (nullable = true)\n |-- subStudyDescription: string (nullable = true)\n |-- qualityControls: array (nullable = true)\n |    |-- element: string (containsNull = false)\n |-- finemappingMethod: string (nullable = true)\n |-- credibleSet: array (nullable = true)\n |    |-- element: struct (containsNull = true)\n |    |    |-- is95CredibleSet: boolean (nullable = true)\n |    |    |-- is99CredibleSet: boolean (nullable = true)\n |    |    |-- logABF: double (nullable = true)\n |    |    |-- posteriorProbability: double (nullable = true)\n |    |    |-- tagVariantId: string (nullable = true)\n |    |    |-- tagPValue: double (nullable = true)\n |    |    |-- tagPValueConditioned: double (nullable = true)\n |    |    |-- tagBeta: double (nullable = true)\n |    |    |-- tagStandardError: double (nullable = true)\n |    |    |-- tagBetaConditioned: double (nullable = true)\n |    |    |-- tagStandardErrorConditioned: double (nullable = true)\n |    |    |-- r2Overall: double (nullable = true)\n
"},{"location":"components/dataset/study_locus/_study_locus/#study-locus-quality-controls","title":"Study-locus quality controls","text":"

Bases: Enum

Study-Locus quality control options listing concerns on the quality of the association.

Attributes:

Name Type Description SUBSIGNIFICANT_FLAG str

p-value below significance threshold

NO_GENOMIC_LOCATION_FLAG str

Incomplete genomic mapping

COMPOSITE_FLAG str

Composite association due to variant x variant interactions

VARIANT_INCONSISTENCY_FLAG str

Inconsistencies in the reported variants

NON_MAPPED_VARIANT_FLAG str

Variant not mapped to GnomAd

PALINDROMIC_ALLELE_FLAG str

Alleles are palindromic - cannot harmonize

AMBIGUOUS_STUDY str

Association with ambiguous study

UNRESOLVED_LD str

Variant not found in LD reference

LD_CLUMPED str

Explained by a more significant variant in high LD (clumped)

Source code in src/otg/dataset/study_locus.py
class StudyLocusQualityCheck(Enum):\n\"\"\"Study-Locus quality control options listing concerns on the quality of the association.\n\n    Attributes:\n        SUBSIGNIFICANT_FLAG (str): p-value below significance threshold\n        NO_GENOMIC_LOCATION_FLAG (str): Incomplete genomic mapping\n        COMPOSITE_FLAG (str): Composite association due to variant x variant interactions\n        VARIANT_INCONSISTENCY_FLAG (str): Inconsistencies in the reported variants\n        NON_MAPPED_VARIANT_FLAG (str): Variant not mapped to GnomAd\n        PALINDROMIC_ALLELE_FLAG (str): Alleles are palindromic - cannot harmonize\n        AMBIGUOUS_STUDY (str): Association with ambiguous study\n        UNRESOLVED_LD (str): Variant not found in LD reference\n        LD_CLUMPED (str): Explained by a more significant variant in high LD (clumped)\n    \"\"\"\n\n    SUBSIGNIFICANT_FLAG = \"Subsignificant p-value\"\n    NO_GENOMIC_LOCATION_FLAG = \"Incomplete genomic mapping\"\n    COMPOSITE_FLAG = \"Composite association\"\n    INCONSISTENCY_FLAG = \"Variant inconsistency\"\n    NON_MAPPED_VARIANT_FLAG = \"No mapping in GnomAd\"\n    PALINDROMIC_ALLELE_FLAG = \"Palindrome alleles - cannot harmonize\"\n    AMBIGUOUS_STUDY = \"Association with ambiguous study\"\n    UNRESOLVED_LD = \"Variant not found in LD reference\"\n    LD_CLUMPED = \"Explained by a more significant variant in high LD (clumped)\"\n
"},{"location":"components/dataset/study_locus/_study_locus/#credible-interval","title":"Credible interval","text":"

Bases: Enum

Credible interval enum.

Interval within which an unobserved parameter value falls with a particular probability.

Attributes:

Name Type Description IS95 str

95% credible interval

IS99 str

99% credible interval

Source code in src/otg/dataset/study_locus.py
class CredibleInterval(Enum):\n\"\"\"Credible interval enum.\n\n    Interval within which an unobserved parameter value falls with a particular probability.\n\n    Attributes:\n        IS95 (str): 95% credible interval\n        IS99 (str): 99% credible interval\n    \"\"\"\n\n    IS95 = \"is95CredibleSet\"\n    IS99 = \"is99CredibleSet\"\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/","title":"Study locus gwas catalog","text":"

Bases: StudyLocus

Study-locus dataset derived from GWAS Catalog.

Source code in src/otg/dataset/study_locus.py
class StudyLocusGWASCatalog(StudyLocus):\n\"\"\"Study-locus dataset derived from GWAS Catalog.\"\"\"\n\n    @staticmethod\n    def _parse_pvalue(pvalue: Column) -> tuple[Column, Column]:\n\"\"\"Parse p-value column.\n\n        Args:\n            pvalue (Column): p-value [string]\n\n        Returns:\n            tuple[Column, Column]: p-value mantissa and exponent\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"1.0\"), (\"0.5\"), (\"1E-20\"), (\"3E-3\"), (\"1E-1000\")]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.select('value',*StudyLocusGWASCatalog._parse_pvalue(f.col('value'))).show()\n            +-------+--------------+--------------+\n            |  value|pValueMantissa|pValueExponent|\n            +-------+--------------+--------------+\n            |    1.0|           1.0|             1|\n            |    0.5|           0.5|             1|\n            |  1E-20|           1.0|           -20|\n            |   3E-3|           3.0|            -3|\n            |1E-1000|           1.0|         -1000|\n            +-------+--------------+--------------+\n            <BLANKLINE>\n\n        \"\"\"\n        split = f.split(pvalue, \"E\")\n        return split.getItem(0).cast(\"float\").alias(\"pValueMantissa\"), f.coalesce(\n            split.getItem(1).cast(\"integer\"), f.lit(1)\n        ).alias(\"pValueExponent\")\n\n    @staticmethod\n    def _normalise_pvaluetext(p_value_text: Column) -> Column:\n\"\"\"Normalised p-value text column to a standardised format.\n\n        For cases where there is no mapping, the value is set to null.\n\n        Args:\n            p_value_text (Column): `pValueText` column from GWASCatalog\n\n        Returns:\n            Column: Array column after using GWAS Catalog mappings. There might be multiple mappings for a single p-value text.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"European Ancestry\"), (\"African ancestry\"), (\"Alzheimer\u2019s Disease\"), (\"(progression)\"), (\"\"), (None)]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.withColumn('normalised', StudyLocusGWASCatalog._normalise_pvaluetext(f.col('value'))).show()\n            +-------------------+----------+\n            |              value|normalised|\n            +-------------------+----------+\n            |  European Ancestry|      [EA]|\n            |   African ancestry|      [AA]|\n            |Alzheimer\u2019s Disease|      [AD]|\n            |      (progression)|      null|\n            |                   |      null|\n            |               null|      null|\n            +-------------------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # GWAS Catalog to p-value mapping\n        json_dict = json.loads(\n            pkg_resources.read_text(data, \"gwas_pValueText_map.json\", encoding=\"utf-8\")\n        )\n        map_expr = f.create_map(*[f.lit(x) for x in chain(*json_dict.items())])\n\n        splitted_col = f.split(f.regexp_replace(p_value_text, r\"[\\(\\)]\", \"\"), \",\")\n        mapped_col = f.transform(splitted_col, lambda x: map_expr[x])\n        return f.when(f.forall(mapped_col, lambda x: x.isNull()), None).otherwise(\n            mapped_col\n        )\n\n    @staticmethod\n    def _normalise_risk_allele(risk_allele: Column) -> Column:\n\"\"\"Normalised risk allele column to a standardised format.\n\n        If multiple risk alleles are present, the first one is returned.\n\n        Args:\n            risk_allele (Column): `riskAllele` column from GWASCatalog\n\n        Returns:\n            Column: mapped using GWAS Catalog mapping\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [(\"rs1234-A-G\"), (\"rs1234-A\"), (\"rs1234-A; rs1235-G\")]\n            >>> df = spark.createDataFrame(d, t.StringType())\n            >>> df.withColumn('normalised', StudyLocusGWASCatalog._normalise_risk_allele(f.col('value'))).show()\n            +------------------+----------+\n            |             value|normalised|\n            +------------------+----------+\n            |        rs1234-A-G|         A|\n            |          rs1234-A|         A|\n            |rs1234-A; rs1235-G|         A|\n            +------------------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # GWAS Catalog to risk allele mapping\n        return f.split(f.split(risk_allele, \"; \").getItem(0), \"-\").getItem(1)\n\n    @staticmethod\n    def _collect_rsids(\n        snp_id: Column, snp_id_current: Column, risk_allele: Column\n    ) -> Column:\n\"\"\"It takes three columns, and returns an array of distinct values from those columns.\n\n        Args:\n            snp_id (Column): The original snp id from the GWAS catalog.\n            snp_id_current (Column): The current snp id field is just a number at the moment (stored as a string). Adding 'rs' prefix if looks good.\n            risk_allele (Column): The risk allele for the SNP.\n\n        Returns:\n            An array of distinct values.\n        \"\"\"\n        # The current snp id field is just a number at the moment (stored as a string). Adding 'rs' prefix if looks good.\n        snp_id_current = f.when(\n            snp_id_current.rlike(\"^[0-9]*$\"),\n            f.format_string(\"rs%s\", snp_id_current),\n        )\n        # Cleaning risk allele:\n        risk_allele = f.split(risk_allele, \"-\").getItem(0)\n\n        # Collecting all values:\n        return f.array_distinct(f.array(snp_id, snp_id_current, risk_allele))\n\n    @staticmethod\n    def _map_to_variant_annotation_variants(\n        gwas_associations: DataFrame, variant_annotation: VariantAnnotation\n    ) -> DataFrame:\n\"\"\"Add variant metadata in associations.\n\n        Args:\n            gwas_associations (DataFrame): raw GWAS Catalog associations\n            variant_annotation (VariantAnnotation): variant annotation dataset\n\n        Returns:\n            DataFrame: GWAS Catalog associations data including `variantId`, `referenceAllele`,\n            `alternateAllele`, `chromosome`, `position` with variant metadata\n        \"\"\"\n        # Subset of GWAS Catalog associations required for resolving variant IDs:\n        gwas_associations_subset = gwas_associations.select(\n            \"studyLocusId\",\n            f.col(\"CHR_ID\").alias(\"chromosome\"),\n            f.col(\"CHR_POS\").cast(IntegerType()).alias(\"position\"),\n            # List of all SNPs associated with the variant\n            StudyLocusGWASCatalog._collect_rsids(\n                f.split(f.col(\"SNPS\"), \"; \").getItem(0),\n                f.col(\"SNP_ID_CURRENT\"),\n                f.split(f.col(\"STRONGEST SNP-RISK ALLELE\"), \"; \").getItem(0),\n            ).alias(\"rsIdsGwasCatalog\"),\n            StudyLocusGWASCatalog._normalise_risk_allele(\n                f.col(\"STRONGEST SNP-RISK ALLELE\")\n            ).alias(\"riskAllele\"),\n        )\n\n        # Subset of variant annotation required for GWAS Catalog annotations:\n        va_subset = variant_annotation.df.select(\n            \"variantId\",\n            \"chromosome\",\n            \"position\",\n            f.col(\"rsIds\").alias(\"rsIdsGnomad\"),\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"alleleFrequencies\",\n            variant_annotation.max_maf().alias(\"maxMaf\"),\n        ).join(\n            f.broadcast(\n                gwas_associations_subset.select(\"chromosome\", \"position\").distinct()\n            ),\n            on=[\"chromosome\", \"position\"],\n            how=\"inner\",\n        )\n\n        # Semi-resolved ids (still contains duplicates when conclusion was not possible to make\n        # based on rsIds or allele concordance)\n        filtered_associations = (\n            gwas_associations_subset.join(\n                f.broadcast(va_subset),\n                on=[\"chromosome\", \"position\"],\n                how=\"left\",\n            )\n            .withColumn(\n                \"rsIdFilter\",\n                StudyLocusGWASCatalog._flag_mappings_to_retain(\n                    f.col(\"studyLocusId\"),\n                    StudyLocusGWASCatalog._compare_rsids(\n                        f.col(\"rsIdsGnomad\"), f.col(\"rsIdsGwasCatalog\")\n                    ),\n                ),\n            )\n            .withColumn(\n                \"concordanceFilter\",\n                StudyLocusGWASCatalog._flag_mappings_to_retain(\n                    f.col(\"studyLocusId\"),\n                    StudyLocusGWASCatalog._check_concordance(\n                        f.col(\"riskAllele\"),\n                        f.col(\"referenceAllele\"),\n                        f.col(\"alternateAllele\"),\n                    ),\n                ),\n            )\n            .filter(\n                # Filter out rows where GWAS Catalog rsId does not match with GnomAD rsId,\n                # but there is corresponding variant for the same association\n                f.col(\"rsIdFilter\")\n                # or filter out rows where GWAS Catalog alleles are not concordant with GnomAD alleles,\n                # but there is corresponding variant for the same association\n                | f.col(\"concordanceFilter\")\n            )\n        )\n\n        # Keep only highest maxMaf variant per studyLocusId\n        fully_mapped_associations = get_record_with_maximum_value(\n            filtered_associations, grouping_col=\"studyLocusId\", sorting_col=\"maxMaf\"\n        ).select(\n            \"studyLocusId\",\n            \"variantId\",\n            \"referenceAllele\",\n            \"alternateAllele\",\n            \"chromosome\",\n            \"position\",\n        )\n\n        return gwas_associations.join(\n            fully_mapped_associations, on=\"studyLocusId\", how=\"left\"\n        )\n\n    @staticmethod\n    def _compare_rsids(gnomad: Column, gwas: Column) -> Column:\n\"\"\"If the intersection of the two arrays is greater than 0, return True, otherwise return False.\n\n        Args:\n            gnomad (Column): rsids from gnomad\n            gwas (Column): rsids from the GWAS Catalog\n\n        Returns:\n            A boolean column that is true if the GnomAD rsIDs can be found in the GWAS rsIDs.\n\n        Examples:\n            >>> d = [\n            ...    (1, [\"rs123\", \"rs523\"], [\"rs123\"]),\n            ...    (2, [], [\"rs123\"]),\n            ...    (3, [\"rs123\", \"rs523\"], []),\n            ...    (4, [], []),\n            ... ]\n            >>> df = spark.createDataFrame(d, ['associationId', 'gnomad', 'gwas'])\n            >>> df.withColumn(\"rsid_matches\", StudyLocusGWASCatalog._compare_rsids(f.col(\"gnomad\"),f.col('gwas'))).show()\n            +-------------+--------------+-------+------------+\n            |associationId|        gnomad|   gwas|rsid_matches|\n            +-------------+--------------+-------+------------+\n            |            1|[rs123, rs523]|[rs123]|        true|\n            |            2|            []|[rs123]|       false|\n            |            3|[rs123, rs523]|     []|       false|\n            |            4|            []|     []|       false|\n            +-------------+--------------+-------+------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.when(f.size(f.array_intersect(gnomad, gwas)) > 0, True).otherwise(\n            False\n        )\n\n    @staticmethod\n    def _flag_mappings_to_retain(\n        association_id: Column, filter_column: Column\n    ) -> Column:\n\"\"\"Flagging mappings to drop for each association.\n\n        Some associations have multiple mappings. Some has matching rsId others don't. We only\n        want to drop the non-matching mappings, when a matching is available for the given association.\n        This logic can be generalised for other measures eg. allele concordance.\n\n        Args:\n            association_id (Column): association identifier column\n            filter_column (Column): boolean col indicating to keep a mapping\n\n        Returns:\n            A column with a boolean value.\n\n        Examples:\n        >>> d = [\n        ...    (1, False),\n        ...    (1, False),\n        ...    (2, False),\n        ...    (2, True),\n        ...    (3, True),\n        ...    (3, True),\n        ... ]\n        >>> df = spark.createDataFrame(d, ['associationId', 'filter'])\n        >>> df.withColumn(\"isConcordant\", StudyLocusGWASCatalog._flag_mappings_to_retain(f.col(\"associationId\"),f.col('filter'))).show()\n        +-------------+------+------------+\n        |associationId|filter|isConcordant|\n        +-------------+------+------------+\n        |            1| false|        true|\n        |            1| false|        true|\n        |            2| false|       false|\n        |            2|  true|        true|\n        |            3|  true|        true|\n        |            3|  true|        true|\n        +-------------+------+------------+\n        <BLANKLINE>\n\n        \"\"\"\n        w = Window.partitionBy(association_id)\n\n        # Generating a boolean column informing if the filter column contains true anywhere for the association:\n        aggregated_filter = f.when(\n            f.array_contains(f.collect_set(filter_column).over(w), True), True\n        ).otherwise(False)\n\n        # Generate a filter column:\n        return f.when(aggregated_filter & (~filter_column), False).otherwise(True)\n\n    @staticmethod\n    def _check_concordance(\n        risk_allele: Column, reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the risk allele is concordant with the alt or ref allele.\n\n        If the risk allele is the same as the reference or alternate allele, or if the reverse complement of\n        the risk allele is the same as the reference or alternate allele, then the allele is concordant.\n        If no mapping is available (ref/alt is null), the function returns True.\n\n        Args:\n            risk_allele (Column): The allele that is associated with the risk of the disease.\n            reference_allele (Column): The reference allele from the GWAS catalog\n            alternate_allele (Column): The alternate allele of the variant.\n\n        Returns:\n            A boolean column that is True if the risk allele is the same as the reference or alternate allele,\n            or if the reverse complement of the risk allele is the same as the reference or alternate allele.\n\n        Examples:\n            >>> d = [\n            ...     ('A', 'A', 'G'),\n            ...     ('A', 'T', 'G'),\n            ...     ('A', 'C', 'G'),\n            ...     ('A', 'A', '?'),\n            ...     (None, None, 'A'),\n            ... ]\n            >>> df = spark.createDataFrame(d, ['riskAllele', 'referenceAllele', 'alternateAllele'])\n            >>> df.withColumn(\"isConcordant\", StudyLocusGWASCatalog._check_concordance(f.col(\"riskAllele\"),f.col('referenceAllele'), f.col('alternateAllele'))).show()\n            +----------+---------------+---------------+------------+\n            |riskAllele|referenceAllele|alternateAllele|isConcordant|\n            +----------+---------------+---------------+------------+\n            |         A|              A|              G|        true|\n            |         A|              T|              G|        true|\n            |         A|              C|              G|       false|\n            |         A|              A|              ?|        true|\n            |      null|           null|              A|        true|\n            +----------+---------------+---------------+------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Calculating the reverse complement of the risk allele:\n        risk_allele_reverse_complement = f.when(\n            risk_allele.rlike(r\"^[ACTG]+$\"),\n            f.reverse(f.translate(risk_allele, \"ACTG\", \"TGAC\")),\n        ).otherwise(risk_allele)\n\n        # OK, is the risk allele or the reverse complent is the same as the mapped alleles:\n        return (\n            f.when(\n                (risk_allele == reference_allele) | (risk_allele == alternate_allele),\n                True,\n            )\n            # If risk allele is found on the negative strand:\n            .when(\n                (risk_allele_reverse_complement == reference_allele)\n                | (risk_allele_reverse_complement == alternate_allele),\n                True,\n            )\n            # If risk allele is ambiguous, still accepted: < This condition could be reconsidered\n            .when(risk_allele == \"?\", True)\n            # If the association could not be mapped we keep it:\n            .when(reference_allele.isNull(), True)\n            # Allele is discordant:\n            .otherwise(False)\n        )\n\n    @staticmethod\n    def _get_reverse_complement(allele_col: Column) -> Column:\n\"\"\"A function to return the reverse complement of an allele column.\n\n        It takes a string and returns the reverse complement of that string if it's a DNA sequence,\n        otherwise it returns the original string. Assumes alleles in upper case.\n\n        Args:\n            allele_col (Column): The column containing the allele to reverse complement.\n\n        Returns:\n            A column that is the reverse complement of the allele column.\n\n        Examples:\n            >>> d = [{\"allele\": 'A'}, {\"allele\": 'T'},{\"allele\": 'G'}, {\"allele\": 'C'},{\"allele\": 'AC'}, {\"allele\": 'GTaatc'},{\"allele\": '?'}, {\"allele\": None}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"revcom_allele\", StudyLocusGWASCatalog._get_reverse_complement(f.col(\"allele\"))).show()\n            +------+-------------+\n            |allele|revcom_allele|\n            +------+-------------+\n            |     A|            T|\n            |     T|            A|\n            |     G|            C|\n            |     C|            G|\n            |    AC|           GT|\n            |GTaatc|       GATTAC|\n            |     ?|            ?|\n            |  null|         null|\n            +------+-------------+\n            <BLANKLINE>\n\n        \"\"\"\n        allele_col = f.upper(allele_col)\n        return f.when(\n            allele_col.rlike(\"[ACTG]+\"),\n            f.reverse(f.translate(allele_col, \"ACTG\", \"TGAC\")),\n        ).otherwise(allele_col)\n\n    @staticmethod\n    def _effect_needs_harmonisation(\n        risk_allele: Column, reference_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the effect allele needs to be harmonised.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Effect allele column\n\n        Returns:\n            A boolean column indicating if the effect allele needs to be harmonised.\n\n        Examples:\n            >>> d = [{\"risk\": 'A', \"reference\": 'A'}, {\"risk\": 'A', \"reference\": 'T'}, {\"risk\": 'AT', \"reference\": 'TA'}, {\"risk\": 'AT', \"reference\": 'AT'}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"needs_harmonisation\", StudyLocusGWASCatalog._effect_needs_harmonisation(f.col(\"risk\"), f.col(\"reference\"))).show()\n            +---------+----+-------------------+\n            |reference|risk|needs_harmonisation|\n            +---------+----+-------------------+\n            |        A|   A|               true|\n            |        T|   A|               true|\n            |       TA|  AT|              false|\n            |       AT|  AT|               true|\n            +---------+----+-------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return (risk_allele == reference_allele) | (\n            risk_allele\n            == StudyLocusGWASCatalog._get_reverse_complement(reference_allele)\n        )\n\n    @staticmethod\n    def _are_alleles_palindromic(\n        reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"A function to check if the alleles are palindromic.\n\n        Args:\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n\n        Returns:\n            A boolean column indicating if the alleles are palindromic.\n\n        Examples:\n            >>> d = [{\"reference\": 'A', \"alternate\": 'T'}, {\"reference\": 'AT', \"alternate\": 'AG'}, {\"reference\": 'AT', \"alternate\": 'AT'}, {\"reference\": 'CATATG', \"alternate\": 'CATATG'}, {\"reference\": '-', \"alternate\": None}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"is_palindromic\", StudyLocusGWASCatalog._are_alleles_palindromic(f.col(\"reference\"), f.col(\"alternate\"))).show()\n            +---------+---------+--------------+\n            |alternate|reference|is_palindromic|\n            +---------+---------+--------------+\n            |        T|        A|          true|\n            |       AG|       AT|         false|\n            |       AT|       AT|          true|\n            |   CATATG|   CATATG|          true|\n            |     null|        -|         false|\n            +---------+---------+--------------+\n            <BLANKLINE>\n\n        \"\"\"\n        revcomp = StudyLocusGWASCatalog._get_reverse_complement(alternate_allele)\n        return (\n            f.when(reference_allele == revcomp, True)\n            .when(revcomp.isNull(), False)\n            .otherwise(False)\n        )\n\n    @staticmethod\n    def _harmonise_beta(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n    ) -> Column:\n\"\"\"A function to extract the beta value from the effect size and confidence interval.\n\n        If the confidence interval contains the word \"increase\" or \"decrease\" it indicates, we are dealing with betas.\n        If it's \"increase\" and the effect size needs to be harmonized, then multiply the effect size by -1\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n\n        Returns:\n            A column containing the beta value.\n        \"\"\"\n        return (\n            f.when(\n                StudyLocusGWASCatalog._are_alleles_palindromic(\n                    reference_allele, alternate_allele\n                ),\n                None,\n            )\n            .when(\n                (\n                    StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & confidence_interval.contains(\"increase\")\n                )\n                | (\n                    ~StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & confidence_interval.contains(\"decrease\")\n                ),\n                -effect_size,\n            )\n            .otherwise(effect_size)\n            .cast(DoubleType())\n        )\n\n    @staticmethod\n    def _harmonise_beta_ci(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n        p_value: Column,\n        direction: str,\n    ) -> Column:\n\"\"\"Calculating confidence intervals for beta values.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n            p_value (Column): GWAS Catalog p-value column\n            direction (str): This is the direction of the confidence interval. It can be either \"upper\" or \"lower\".\n\n        Returns:\n            The upper and lower bounds of the confidence interval for the beta coefficient.\n        \"\"\"\n        zscore_95 = f.lit(1.96)\n        beta = StudyLocusGWASCatalog._harmonise_beta(\n            risk_allele,\n            reference_allele,\n            alternate_allele,\n            effect_size,\n            confidence_interval,\n        )\n        zscore = pvalue_to_zscore(p_value)\n        return (\n            f.when(f.lit(direction) == \"upper\", beta + f.abs(zscore_95 * beta) / zscore)\n            .when(f.lit(direction) == \"lower\", beta - f.abs(zscore_95 * beta) / zscore)\n            .otherwise(None)\n        )\n\n    @staticmethod\n    def _harmonise_odds_ratio(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n    ) -> Column:\n\"\"\"Harmonizing odds ratio.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n\n        Returns:\n            A column with the odds ratio, or 1/odds_ratio if harmonization required.\n        \"\"\"\n        return (\n            f.when(\n                StudyLocusGWASCatalog._are_alleles_palindromic(\n                    reference_allele, alternate_allele\n                ),\n                None,\n            )\n            .when(\n                (\n                    StudyLocusGWASCatalog._effect_needs_harmonisation(\n                        risk_allele, reference_allele\n                    )\n                    & ~confidence_interval.rlike(\"|\".join([\"decrease\", \"increase\"]))\n                ),\n                1 / effect_size,\n            )\n            .otherwise(effect_size)\n            .cast(DoubleType())\n        )\n\n    @staticmethod\n    def _harmonise_odds_ratio_ci(\n        risk_allele: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        effect_size: Column,\n        confidence_interval: Column,\n        p_value: Column,\n        direction: str,\n    ) -> Column:\n\"\"\"Calculating confidence intervals for beta values.\n\n        Args:\n            risk_allele (Column): Risk allele column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            effect_size (Column): GWAS Catalog effect size column\n            confidence_interval (Column): GWAS Catalog confidence interval column\n            p_value (Column): GWAS Catalog p-value column\n            direction (str): This is the direction of the confidence interval. It can be either \"upper\" or \"lower\".\n\n        Returns:\n            The upper and lower bounds of the 95% confidence interval for the odds ratio.\n        \"\"\"\n        zscore_95 = f.lit(1.96)\n        odds_ratio = StudyLocusGWASCatalog._harmonise_odds_ratio(\n            risk_allele,\n            reference_allele,\n            alternate_allele,\n            effect_size,\n            confidence_interval,\n        )\n        odds_ratio_estimate = f.log(odds_ratio)\n        zscore = pvalue_to_zscore(p_value)\n        odds_ratio_se = odds_ratio_estimate / zscore\n        return f.when(\n            f.lit(direction) == \"upper\",\n            f.exp(odds_ratio_estimate + f.abs(zscore_95 * odds_ratio_se)),\n        ).when(\n            f.lit(direction) == \"lower\",\n            f.exp(odds_ratio_estimate - f.abs(zscore_95 * odds_ratio_se)),\n        )\n\n    @staticmethod\n    def _concatenate_substudy_description(\n        association_trait: Column, pvalue_text: Column, mapped_trait_uri: Column\n    ) -> Column:\n\"\"\"Substudy description parsing. Complex string containing metadata about the substudy (e.g. QTL, specific EFO, etc.).\n\n        Args:\n            association_trait (Column): GWAS Catalog association trait column\n            pvalue_text (Column): GWAS Catalog p-value text column\n            mapped_trait_uri (Column): GWAS Catalog mapped trait URI column\n\n        Returns:\n            A column with the substudy description in the shape trait|pvaluetext1_pvaluetext2|EFO1_EFO2.\n\n        Examples:\n        >>> df = spark.createDataFrame([\n        ...    (\"Height\", \"http://www.ebi.ac.uk/efo/EFO_0000001,http://www.ebi.ac.uk/efo/EFO_0000002\", \"European Ancestry\"),\n        ...    (\"Schizophrenia\", \"http://www.ebi.ac.uk/efo/MONDO_0005090\", None)],\n        ...    [\"association_trait\", \"mapped_trait_uri\", \"pvalue_text\"]\n        ... )\n        >>> df.withColumn('substudy_description', StudyLocusGWASCatalog._concatenate_substudy_description(df.association_trait, df.pvalue_text, df.mapped_trait_uri)).show(truncate=False)\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        |association_trait|mapped_trait_uri                                                         |pvalue_text      |substudy_description                      |\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        |Height           |http://www.ebi.ac.uk/efo/EFO_0000001,http://www.ebi.ac.uk/efo/EFO_0000002|European Ancestry|Height|EA|EFO_0000001/EFO_0000002         |\n        |Schizophrenia    |http://www.ebi.ac.uk/efo/MONDO_0005090                                   |null             |Schizophrenia|no_pvalue_text|MONDO_0005090|\n        +-----------------+-------------------------------------------------------------------------+-----------------+------------------------------------------+\n        <BLANKLINE>\n        \"\"\"\n        p_value_text = f.coalesce(\n            StudyLocusGWASCatalog._normalise_pvaluetext(pvalue_text),\n            f.array(f.lit(\"no_pvalue_text\")),\n        )\n        return f.concat_ws(\n            \"|\",\n            association_trait,\n            f.concat_ws(\n                \"/\",\n                p_value_text,\n            ),\n            f.concat_ws(\n                \"/\",\n                parse_efos(mapped_trait_uri),\n            ),\n        )\n\n    @staticmethod\n    def _qc_all(\n        qc: Column,\n        chromosome: Column,\n        position: Column,\n        reference_allele: Column,\n        alternate_allele: Column,\n        strongest_snp_risk_allele: Column,\n        p_value_mantissa: Column,\n        p_value_exponent: Column,\n        p_value_cutoff: float,\n    ) -> Column:\n\"\"\"Flag associations that fail any QC.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column\n            position (Column): Position column\n            reference_allele (Column): Reference allele column\n            alternate_allele (Column): Alternate allele column\n            strongest_snp_risk_allele (Column): Strongest SNP risk allele column\n            p_value_mantissa (Column): P-value mantissa column\n            p_value_exponent (Column): P-value exponent column\n            p_value_cutoff (float): P-value cutoff\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        qc = StudyLocusGWASCatalog._qc_variant_interactions(\n            qc, strongest_snp_risk_allele\n        )\n        qc = StudyLocusGWASCatalog._qc_subsignificant_associations(\n            qc, p_value_mantissa, p_value_exponent, p_value_cutoff\n        )\n        qc = StudyLocusGWASCatalog._qc_genomic_location(qc, chromosome, position)\n        qc = StudyLocusGWASCatalog._qc_variant_inconsistencies(\n            qc, chromosome, position, strongest_snp_risk_allele\n        )\n        qc = StudyLocusGWASCatalog._qc_unmapped_variants(qc, alternate_allele)\n        qc = StudyLocusGWASCatalog._qc_palindromic_alleles(\n            qc, reference_allele, alternate_allele\n        )\n        return qc\n\n    @staticmethod\n    def _qc_variant_interactions(\n        qc: Column, strongest_snp_risk_allele: Column\n    ) -> Column:\n\"\"\"Flag associations based on variant x variant interactions.\n\n        Args:\n            qc (Column): QC column\n            strongest_snp_risk_allele (Column): Column with the strongest SNP risk allele\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        return StudyLocusGWASCatalog._update_quality_flag(\n            qc,\n            strongest_snp_risk_allele.contains(\";\"),\n            StudyLocusQualityCheck.COMPOSITE_FLAG,\n        )\n\n    @staticmethod\n    def _qc_subsignificant_associations(\n        qc: Column,\n        p_value_mantissa: Column,\n        p_value_exponent: Column,\n        pvalue_cutoff: float,\n    ) -> Column:\n\"\"\"Flag associations below significant threshold.\n\n        Args:\n            qc (Column): QC column\n            p_value_mantissa (Column): P-value mantissa column\n            p_value_exponent (Column): P-value exponent column\n            pvalue_cutoff (float): association p-value cut-off\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Examples:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -7}, {'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -8}, {'qc': None, 'p_value_mantissa': 5, 'p_value_exponent': -8}, {'qc': None, 'p_value_mantissa': 1, 'p_value_exponent': -9}]\n            >>> df = spark.createDataFrame(d, t.StructType([t.StructField('qc', t.ArrayType(t.StringType()), True), t.StructField('p_value_mantissa', t.IntegerType()), t.StructField('p_value_exponent', t.IntegerType())]))\n            >>> df.withColumn('qc', StudyLocusGWASCatalog._qc_subsignificant_associations(f.col(\"qc\"), f.col(\"p_value_mantissa\"), f.col(\"p_value_exponent\"), 5e-8)).show(truncate = False)\n            +------------------------+----------------+----------------+\n            |qc                      |p_value_mantissa|p_value_exponent|\n            +------------------------+----------------+----------------+\n            |[Subsignificant p-value]|1               |-7              |\n            |[]                      |1               |-8              |\n            |[]                      |5               |-8              |\n            |[]                      |1               |-9              |\n            +------------------------+----------------+----------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            calculate_neglog_pvalue(p_value_mantissa, p_value_exponent)\n            < f.lit(-np.log10(pvalue_cutoff)),\n            StudyLocusQualityCheck.SUBSIGNIFICANT_FLAG,\n        )\n\n    @staticmethod\n    def _qc_genomic_location(\n        qc: Column, chromosome: Column, position: Column\n    ) -> Column:\n\"\"\"Flag associations without genomic location in GWAS Catalog.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column in GWAS Catalog\n            position (Column): Position column in GWAS Catalog\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Examples:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'qc': None, 'chromosome': None, 'position': None}, {'qc': None, 'chromosome': '1', 'position': None}, {'qc': None, 'chromosome': None, 'position': 1}, {'qc': None, 'chromosome': '1', 'position': 1}]\n            >>> df = spark.createDataFrame(d, schema=t.StructType([t.StructField('qc', t.ArrayType(t.StringType()), True), t.StructField('chromosome', t.StringType()), t.StructField('position', t.IntegerType())]))\n            >>> df.withColumn('qc', StudyLocusGWASCatalog._qc_genomic_location(df.qc, df.chromosome, df.position)).show(truncate=False)\n            +----------------------------+----------+--------+\n            |qc                          |chromosome|position|\n            +----------------------------+----------+--------+\n            |[Incomplete genomic mapping]|null      |null    |\n            |[Incomplete genomic mapping]|1         |null    |\n            |[Incomplete genomic mapping]|null      |1       |\n            |[]                          |1         |1       |\n            +----------------------------+----------+--------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            position.isNull() | chromosome.isNull(),\n            StudyLocusQualityCheck.NO_GENOMIC_LOCATION_FLAG,\n        )\n\n    @staticmethod\n    def _qc_variant_inconsistencies(\n        qc: Column,\n        chromosome: Column,\n        position: Column,\n        strongest_snp_risk_allele: Column,\n    ) -> Column:\n\"\"\"Flag associations with inconsistencies in the variant annotation.\n\n        Args:\n            qc (Column): QC column\n            chromosome (Column): Chromosome column in GWAS Catalog\n            position (Column): Position column in GWAS Catalog\n            strongest_snp_risk_allele (Column): Strongest SNP risk allele column in GWAS Catalog\n\n        Returns:\n            Column: Updated QC column with flag.\n        \"\"\"\n        return StudyLocusGWASCatalog._update_quality_flag(\n            qc,\n            # Number of chromosomes does not correspond to the number of positions:\n            (f.size(f.split(chromosome, \";\")) != f.size(f.split(position, \";\")))\n            # Number of chromosome values different from riskAllele values:\n            | (\n                f.size(f.split(chromosome, \";\"))\n                != f.size(f.split(strongest_snp_risk_allele, \";\"))\n            ),\n            StudyLocusQualityCheck.INCONSISTENCY_FLAG,\n        )\n\n    @staticmethod\n    def _qc_unmapped_variants(qc: Column, alternate_allele: Column) -> Column:\n\"\"\"Flag associations with variants not mapped to variantAnnotation.\n\n        Args:\n            qc (Column): QC column\n            alternate_allele (Column): alternate allele\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> d = [{'alternate_allele': 'A', 'qc': None}, {'alternate_allele': None, 'qc': None}]\n            >>> schema = t.StructType([t.StructField('alternate_allele', t.StringType(), True), t.StructField('qc', t.ArrayType(t.StringType()), True)])\n            >>> df = spark.createDataFrame(data=d, schema=schema)\n            >>> df.withColumn(\"new_qc\", StudyLocusGWASCatalog._qc_unmapped_variants(f.col(\"qc\"), f.col(\"alternate_allele\"))).show()\n            +----------------+----+--------------------+\n            |alternate_allele|  qc|              new_qc|\n            +----------------+----+--------------------+\n            |               A|null|                  []|\n            |            null|null|[No mapping in Gn...|\n            +----------------+----+--------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            alternate_allele.isNull(),\n            StudyLocusQualityCheck.NON_MAPPED_VARIANT_FLAG,\n        )\n\n    @staticmethod\n    def _qc_palindromic_alleles(\n        qc: Column, reference_allele: Column, alternate_allele: Column\n    ) -> Column:\n\"\"\"Flag associations with palindromic variants which effects can not be harmonised.\n\n        Args:\n            qc (Column): QC column\n            reference_allele (Column): reference allele\n            alternate_allele (Column): alternate allele\n\n        Returns:\n            Column: Updated QC column with flag.\n\n        Example:\n            >>> import pyspark.sql.types as t\n            >>> schema = t.StructType([t.StructField('reference_allele', t.StringType(), True), t.StructField('alternate_allele', t.StringType(), True), t.StructField('qc', t.ArrayType(t.StringType()), True)])\n            >>> d = [{'reference_allele': 'A', 'alternate_allele': 'T', 'qc': None}, {'reference_allele': 'AT', 'alternate_allele': 'TA', 'qc': None}, {'reference_allele': 'AT', 'alternate_allele': 'AT', 'qc': None}]\n            >>> df = spark.createDataFrame(data=d, schema=schema)\n            >>> df.withColumn(\"qc\", StudyLocusGWASCatalog._qc_palindromic_alleles(f.col(\"qc\"), f.col(\"reference_allele\"), f.col(\"alternate_allele\"))).show(truncate=False)\n            +----------------+----------------+---------------------------------------+\n            |reference_allele|alternate_allele|qc                                     |\n            +----------------+----------------+---------------------------------------+\n            |A               |T               |[Palindrome alleles - cannot harmonize]|\n            |AT              |TA              |[]                                     |\n            |AT              |AT              |[Palindrome alleles - cannot harmonize]|\n            +----------------+----------------+---------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return StudyLocus._update_quality_flag(\n            qc,\n            StudyLocusGWASCatalog._are_alleles_palindromic(\n                reference_allele, alternate_allele\n            ),\n            StudyLocusQualityCheck.PALINDROMIC_ALLELE_FLAG,\n        )\n\n    @classmethod\n    def from_source(\n        cls: type[StudyLocusGWASCatalog],\n        gwas_associations: DataFrame,\n        variant_annotation: VariantAnnotation,\n        pvalue_threshold: float = 5e-8,\n    ) -> StudyLocusGWASCatalog:\n\"\"\"Read GWASCatalog associations.\n\n        It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and\n        applies some pre-defined filters on the data:\n\n        Args:\n            gwas_associations (DataFrame): GWAS Catalog raw associations dataset\n            variant_annotation (VariantAnnotation): Variant annotation dataset\n            pvalue_threshold (float): P-value threshold for flagging associations\n\n        Returns:\n            StudyLocusGWASCatalog: StudyLocusGWASCatalog dataset\n        \"\"\"\n        return cls(\n            _df=gwas_associations.withColumn(\n                \"studyLocusId\", f.monotonically_increasing_id().cast(LongType())\n            )\n            .transform(\n                # Map/harmonise variants to variant annotation dataset:\n                # This function adds columns: variantId, referenceAllele, alternateAllele, chromosome, position\n                lambda df: StudyLocusGWASCatalog._map_to_variant_annotation_variants(\n                    df, variant_annotation\n                )\n            )\n            .withColumn(\n                # Perform all quality control checks:\n                \"qualityControls\",\n                StudyLocusGWASCatalog._qc_all(\n                    f.array().alias(\"qualityControls\"),\n                    f.col(\"CHR_ID\"),\n                    f.col(\"CHR_POS\").cast(IntegerType()),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"STRONGEST SNP-RISK ALLELE\"),\n                    *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                    pvalue_threshold,\n                ),\n            )\n            .select(\n                # INSIDE STUDY-LOCUS SCHEMA:\n                \"studyLocusId\",\n                \"variantId\",\n                # Mapped genomic location of the variant (; separated list)\n                \"chromosome\",\n                \"position\",\n                f.col(\"STUDY ACCESSION\").alias(\"studyId\"),\n                # beta value of the association\n                StudyLocusGWASCatalog._harmonise_beta(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                ).alias(\"beta\"),\n                # odds ratio of the association\n                StudyLocusGWASCatalog._harmonise_odds_ratio(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                ).alias(\"oddsRatio\"),\n                # CI lower of the beta value\n                StudyLocusGWASCatalog._harmonise_beta_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"lower\",\n                ).alias(\"betaConfidenceIntervalLower\"),\n                # CI upper for the beta value\n                StudyLocusGWASCatalog._harmonise_beta_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"upper\",\n                ).alias(\"betaConfidenceIntervalUpper\"),\n                # CI lower of the odds ratio value\n                StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"lower\",\n                ).alias(\"oddsRatioConfidenceIntervalLower\"),\n                # CI upper of the odds ratio value\n                StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                    StudyLocusGWASCatalog._normalise_risk_allele(\n                        f.col(\"STRONGEST SNP-RISK ALLELE\")\n                    ),\n                    f.col(\"referenceAllele\"),\n                    f.col(\"alternateAllele\"),\n                    f.col(\"OR or BETA\"),\n                    f.col(\"95% CI (TEXT)\"),\n                    f.col(\"P-VALUE\"),\n                    \"upper\",\n                ).alias(\"oddsRatioConfidenceIntervalUpper\"),\n                # p-value of the association, string: split into exponent and mantissa.\n                *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                # Capturing phenotype granularity at the association level\n                StudyLocusGWASCatalog._concatenate_substudy_description(\n                    f.col(\"DISEASE/TRAIT\"),\n                    f.col(\"P-VALUE (TEXT)\"),\n                    f.col(\"MAPPED_TRAIT_URI\"),\n                ).alias(\"subStudyDescription\"),\n                # Quality controls (array of strings)\n                \"qualityControls\",\n            )\n        )\n\n    def update_study_id(\n        self: StudyLocusGWASCatalog, study_annotation: DataFrame\n    ) -> StudyLocusGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n        Args:\n            study_annotation (DataFrame): Dataframe containing `updatedStudyId` and key columns `studyId` and `subStudyDescription`.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        self.df = (\n            self._df.join(\n                study_annotation, on=[\"studyId\", \"subStudyDescription\"], how=\"left\"\n            )\n            .withColumn(\"studyId\", f.coalesce(\"updatedStudyId\", \"studyId\"))\n            .drop(\"subStudyDescription\", \"updatedStudyId\")\n        )\n        return self\n\n    def annotate_ld(\n        self: StudyLocusGWASCatalog,\n        session: Session,\n        studies: StudyIndexGWASCatalog,\n        ld_populations: list[str],\n        ld_index_template: str,\n        ld_matrix_template: str,\n        min_r2: float,\n    ) -> StudyLocus:\n\"\"\"Annotate LD set for every studyLocus using gnomAD.\n\n        Args:\n            session (Session): Session\n            studies (StudyIndexGWASCatalog): Study index containing ancestry information\n            ld_populations (list[str]): List of populations to annotate\n            ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n            ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n            min_r2 (float): Minimum r2 to include in the LD set\n\n        Returns:\n            StudyLocus: Study-locus with an annotated credible set.\n        \"\"\"\n        # TODO: call unique_study_locus_ancestries here so that it is not duplicated with ld_annotation_by_locus_ancestry\n        # LD annotation for all unique lead variants in all populations (study independent).\n        ld_r = LDAnnotatorGnomad.ld_annotation_by_locus_ancestry(\n            session,\n            self,\n            studies,\n            ld_populations,\n            ld_index_template,\n            ld_matrix_template,\n            min_r2,\n        ).coalesce(400)\n\n        ld_set = (\n            self.unique_study_locus_ancestries(studies)\n            .join(ld_r, on=[\"chromosome\", \"variantId\", \"gnomadPopulation\"], how=\"left\")\n            .withColumn(\"r2\", f.pow(f.col(\"r\"), f.lit(2)))\n            .withColumn(\n                \"r2Overall\",\n                LDAnnotatorGnomad.weighted_r_overall(\n                    f.col(\"chromosome\"),\n                    f.col(\"studyId\"),\n                    f.col(\"variantId\"),\n                    f.col(\"tagVariantId\"),\n                    f.col(\"relativeSampleSize\"),\n                    f.col(\"r2\"),\n                ),\n            )\n            .groupBy(\"chromosome\", \"studyId\", \"variantId\")\n            .agg(\n                f.collect_set(\n                    f.when(\n                        f.col(\"tagVariantId\").isNotNull(),\n                        f.struct(\"tagVariantId\", \"r2Overall\"),\n                    )\n                ).alias(\"credibleSet\")\n            )\n        )\n\n        self.df = self.df.join(\n            ld_set, on=[\"chromosome\", \"studyId\", \"variantId\"], how=\"left\"\n        )\n\n        return self._qc_unresolved_ld()\n\n    def _qc_ambiguous_study(self: StudyLocusGWASCatalog) -> StudyLocusGWASCatalog:\n\"\"\"Flag associations with variants that can not be unambiguously associated with one study.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        assoc_ambiguity_window = Window.partitionBy(\n            f.col(\"studyId\"), f.col(\"variantId\")\n        )\n\n        self._df.withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.count(f.col(\"variantId\")).over(assoc_ambiguity_window) > 1,\n                StudyLocusQualityCheck.AMBIGUOUS_STUDY,\n            ),\n        )\n        return self\n\n    def _qc_unresolved_ld(self: StudyLocusGWASCatalog) -> StudyLocusGWASCatalog:\n\"\"\"Flag associations with variants that are not found in the LD reference.\n\n        Returns:\n            StudyLocusGWASCatalog: Updated study locus.\n        \"\"\"\n        self._df.withColumn(\n            \"qualityControls\",\n            StudyLocus._update_quality_flag(\n                f.col(\"qualityControls\"),\n                f.col(\"credibleSet\").isNull(),\n                StudyLocusQualityCheck.UNRESOLVED_LD,\n            ),\n        )\n        return self\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.annotate_ld","title":"annotate_ld(session, studies, ld_populations, ld_index_template, ld_matrix_template, min_r2)","text":"

Annotate LD set for every studyLocus using gnomAD.

Parameters:

Name Type Description Default session Session

Session

required studies StudyIndexGWASCatalog

Study index containing ancestry information

required ld_populations list[str]

List of populations to annotate

required ld_index_template str

Template path of the LD matrix index containing {POP} where the population is expected

required ld_matrix_template str

Template path of the LD matrix containing {POP} where the population is expected

required min_r2 float

Minimum r2 to include in the LD set

required

Returns:

Name Type Description StudyLocus StudyLocus

Study-locus with an annotated credible set.

Source code in src/otg/dataset/study_locus.py
def annotate_ld(\n    self: StudyLocusGWASCatalog,\n    session: Session,\n    studies: StudyIndexGWASCatalog,\n    ld_populations: list[str],\n    ld_index_template: str,\n    ld_matrix_template: str,\n    min_r2: float,\n) -> StudyLocus:\n\"\"\"Annotate LD set for every studyLocus using gnomAD.\n\n    Args:\n        session (Session): Session\n        studies (StudyIndexGWASCatalog): Study index containing ancestry information\n        ld_populations (list[str]): List of populations to annotate\n        ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n        ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n        min_r2 (float): Minimum r2 to include in the LD set\n\n    Returns:\n        StudyLocus: Study-locus with an annotated credible set.\n    \"\"\"\n    # TODO: call unique_study_locus_ancestries here so that it is not duplicated with ld_annotation_by_locus_ancestry\n    # LD annotation for all unique lead variants in all populations (study independent).\n    ld_r = LDAnnotatorGnomad.ld_annotation_by_locus_ancestry(\n        session,\n        self,\n        studies,\n        ld_populations,\n        ld_index_template,\n        ld_matrix_template,\n        min_r2,\n    ).coalesce(400)\n\n    ld_set = (\n        self.unique_study_locus_ancestries(studies)\n        .join(ld_r, on=[\"chromosome\", \"variantId\", \"gnomadPopulation\"], how=\"left\")\n        .withColumn(\"r2\", f.pow(f.col(\"r\"), f.lit(2)))\n        .withColumn(\n            \"r2Overall\",\n            LDAnnotatorGnomad.weighted_r_overall(\n                f.col(\"chromosome\"),\n                f.col(\"studyId\"),\n                f.col(\"variantId\"),\n                f.col(\"tagVariantId\"),\n                f.col(\"relativeSampleSize\"),\n                f.col(\"r2\"),\n            ),\n        )\n        .groupBy(\"chromosome\", \"studyId\", \"variantId\")\n        .agg(\n            f.collect_set(\n                f.when(\n                    f.col(\"tagVariantId\").isNotNull(),\n                    f.struct(\"tagVariantId\", \"r2Overall\"),\n                )\n            ).alias(\"credibleSet\")\n        )\n    )\n\n    self.df = self.df.join(\n        ld_set, on=[\"chromosome\", \"studyId\", \"variantId\"], how=\"left\"\n    )\n\n    return self._qc_unresolved_ld()\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.from_source","title":"from_source(gwas_associations, variant_annotation, pvalue_threshold=5e-08) classmethod","text":"

Read GWASCatalog associations.

It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and applies some pre-defined filters on the data:

Parameters:

Name Type Description Default gwas_associations DataFrame

GWAS Catalog raw associations dataset

required variant_annotation VariantAnnotation

Variant annotation dataset

required pvalue_threshold float

P-value threshold for flagging associations

5e-08

Returns:

Name Type Description StudyLocusGWASCatalog StudyLocusGWASCatalog

StudyLocusGWASCatalog dataset

Source code in src/otg/dataset/study_locus.py
@classmethod\ndef from_source(\n    cls: type[StudyLocusGWASCatalog],\n    gwas_associations: DataFrame,\n    variant_annotation: VariantAnnotation,\n    pvalue_threshold: float = 5e-8,\n) -> StudyLocusGWASCatalog:\n\"\"\"Read GWASCatalog associations.\n\n    It reads the GWAS Catalog association dataset, selects and renames columns, casts columns, and\n    applies some pre-defined filters on the data:\n\n    Args:\n        gwas_associations (DataFrame): GWAS Catalog raw associations dataset\n        variant_annotation (VariantAnnotation): Variant annotation dataset\n        pvalue_threshold (float): P-value threshold for flagging associations\n\n    Returns:\n        StudyLocusGWASCatalog: StudyLocusGWASCatalog dataset\n    \"\"\"\n    return cls(\n        _df=gwas_associations.withColumn(\n            \"studyLocusId\", f.monotonically_increasing_id().cast(LongType())\n        )\n        .transform(\n            # Map/harmonise variants to variant annotation dataset:\n            # This function adds columns: variantId, referenceAllele, alternateAllele, chromosome, position\n            lambda df: StudyLocusGWASCatalog._map_to_variant_annotation_variants(\n                df, variant_annotation\n            )\n        )\n        .withColumn(\n            # Perform all quality control checks:\n            \"qualityControls\",\n            StudyLocusGWASCatalog._qc_all(\n                f.array().alias(\"qualityControls\"),\n                f.col(\"CHR_ID\"),\n                f.col(\"CHR_POS\").cast(IntegerType()),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"STRONGEST SNP-RISK ALLELE\"),\n                *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n                pvalue_threshold,\n            ),\n        )\n        .select(\n            # INSIDE STUDY-LOCUS SCHEMA:\n            \"studyLocusId\",\n            \"variantId\",\n            # Mapped genomic location of the variant (; separated list)\n            \"chromosome\",\n            \"position\",\n            f.col(\"STUDY ACCESSION\").alias(\"studyId\"),\n            # beta value of the association\n            StudyLocusGWASCatalog._harmonise_beta(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n            ).alias(\"beta\"),\n            # odds ratio of the association\n            StudyLocusGWASCatalog._harmonise_odds_ratio(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n            ).alias(\"oddsRatio\"),\n            # CI lower of the beta value\n            StudyLocusGWASCatalog._harmonise_beta_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"lower\",\n            ).alias(\"betaConfidenceIntervalLower\"),\n            # CI upper for the beta value\n            StudyLocusGWASCatalog._harmonise_beta_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"upper\",\n            ).alias(\"betaConfidenceIntervalUpper\"),\n            # CI lower of the odds ratio value\n            StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"lower\",\n            ).alias(\"oddsRatioConfidenceIntervalLower\"),\n            # CI upper of the odds ratio value\n            StudyLocusGWASCatalog._harmonise_odds_ratio_ci(\n                StudyLocusGWASCatalog._normalise_risk_allele(\n                    f.col(\"STRONGEST SNP-RISK ALLELE\")\n                ),\n                f.col(\"referenceAllele\"),\n                f.col(\"alternateAllele\"),\n                f.col(\"OR or BETA\"),\n                f.col(\"95% CI (TEXT)\"),\n                f.col(\"P-VALUE\"),\n                \"upper\",\n            ).alias(\"oddsRatioConfidenceIntervalUpper\"),\n            # p-value of the association, string: split into exponent and mantissa.\n            *StudyLocusGWASCatalog._parse_pvalue(f.col(\"P-VALUE\")),\n            # Capturing phenotype granularity at the association level\n            StudyLocusGWASCatalog._concatenate_substudy_description(\n                f.col(\"DISEASE/TRAIT\"),\n                f.col(\"P-VALUE (TEXT)\"),\n                f.col(\"MAPPED_TRAIT_URI\"),\n            ).alias(\"subStudyDescription\"),\n            # Quality controls (array of strings)\n            \"qualityControls\",\n        )\n    )\n
"},{"location":"components/dataset/study_locus/study_locus_gwas_catalog/#otg.dataset.study_locus.StudyLocusGWASCatalog.update_study_id","title":"update_study_id(study_annotation)","text":"

Update studyId with a dataframe containing study.

Parameters:

Name Type Description Default study_annotation DataFrame

Dataframe containing updatedStudyId and key columns studyId and subStudyDescription.

required

Returns:

Name Type Description StudyLocusGWASCatalog StudyLocusGWASCatalog

Updated study locus.

Source code in src/otg/dataset/study_locus.py
def update_study_id(\n    self: StudyLocusGWASCatalog, study_annotation: DataFrame\n) -> StudyLocusGWASCatalog:\n\"\"\"Update studyId with a dataframe containing study.\n\n    Args:\n        study_annotation (DataFrame): Dataframe containing `updatedStudyId` and key columns `studyId` and `subStudyDescription`.\n\n    Returns:\n        StudyLocusGWASCatalog: Updated study locus.\n    \"\"\"\n    self.df = (\n        self._df.join(\n            study_annotation, on=[\"studyId\", \"subStudyDescription\"], how=\"left\"\n        )\n        .withColumn(\"studyId\", f.coalesce(\"updatedStudyId\", \"studyId\"))\n        .drop(\"subStudyDescription\", \"updatedStudyId\")\n    )\n    return self\n
"},{"location":"components/method/_method/","title":"Method","text":"

Methods used accross the Open Targets Genetics Pipeline

"},{"location":"components/method/clumping/","title":"Clumping","text":"

Clumping is a commonly used post-processing method that allows for identification of independent association signals from GWAS summary statistics and curated associations. This process is critical because of the complex linkage disequilibrium (LD) structure in human populations, which can result in multiple statistically significant associations within the same genomic region. Clumping methods help reduce redundancy in GWAS results and ensure that each reported association represents an independent signal.

We have implemented 2 clumping methods:

"},{"location":"components/method/clumping/#clumping-based-on-linkage-disequilibrium-ld","title":"Clumping based on Linkage Disequilibrium (LD)","text":"

LD clumping reports the most significant genetic associations in a region in terms of a smaller number of \u201cclumps\u201d of genetically linked SNPs.

Source code in src/otg/method/clump.py
class LDclumping:\n\"\"\"LD clumping reports the most significant genetic associations in a region in terms of a smaller number of \u201cclumps\u201d of genetically linked SNPs.\"\"\"\n\n    @staticmethod\n    def _is_lead_linked(\n        study_id: Column,\n        variant_id: Column,\n        p_value_exponent: Column,\n        p_value_mantissa: Column,\n        credible_set: Column,\n    ) -> Column:\n\"\"\"Evaluates whether a lead variant is linked to a tag (with lowest p-value) in the same studyLocus dataset.\n\n        Args:\n            study_id (Column): studyId\n            variant_id (Column): Lead variant id\n            p_value_exponent (Column): p-value exponent\n            p_value_mantissa (Column): p-value mantissa\n            credible_set (Column): Credible set <array of structs>\n\n        Returns:\n            Column: Boolean in which True indicates that the lead is linked to another tag in the same dataset.\n        \"\"\"\n        leads_in_study = f.collect_set(variant_id).over(Window.partitionBy(study_id))\n        tags_in_studylocus = f.array_union(\n            # Get all tag variants from the credible set per studyLocusId\n            f.transform(credible_set, lambda x: x.tagVariantId),\n            # And append the lead variant so that the intersection is the same for all studyLocusIds in a study\n            f.array(f.col(\"variantId\")),\n        )\n        intersect_lead_tags = f.array_sort(\n            f.array_intersect(leads_in_study, tags_in_studylocus)\n        )\n        return (\n            # If the lead is in the credible set, we rank the peaks by p-value\n            f.when(\n                f.size(intersect_lead_tags) > 0,\n                f.row_number().over(\n                    Window.partitionBy(study_id, intersect_lead_tags).orderBy(\n                        p_value_exponent, p_value_mantissa\n                    )\n                )\n                > 1,\n            )\n            # If the intersection is empty (lead is not in the credible set or cred set is empty), the association is not linked\n            .otherwise(f.lit(False))\n        )\n\n    @classmethod\n    def clump(cls: type[LDclumping], associations: StudyLocus) -> StudyLocus:\n\"\"\"Perform clumping on studyLocus dataset.\n\n        Args:\n            associations (StudyLocus): StudyLocus dataset\n\n        Returns:\n            StudyLocus: including flag and removing credibleSet information for LD clumped loci.\n        \"\"\"\n        return associations.clump()\n
"},{"location":"components/method/clumping/#otg.method.clump.LDclumping.clump","title":"clump(associations) classmethod","text":"

Perform clumping on studyLocus dataset.

Parameters:

Name Type Description Default associations StudyLocus

StudyLocus dataset

required

Returns:

Name Type Description StudyLocus StudyLocus

including flag and removing credibleSet information for LD clumped loci.

Source code in src/otg/method/clump.py
@classmethod\ndef clump(cls: type[LDclumping], associations: StudyLocus) -> StudyLocus:\n\"\"\"Perform clumping on studyLocus dataset.\n\n    Args:\n        associations (StudyLocus): StudyLocus dataset\n\n    Returns:\n        StudyLocus: including flag and removing credibleSet information for LD clumped loci.\n    \"\"\"\n    return associations.clump()\n
"},{"location":"components/method/coloc/","title":"coloc","text":"

Calculate bayesian colocalisation based on overlapping signals from credible sets.

Based on the R COLOC package, which uses the Bayes factors from the credible set to estimate the posterior probability of colocalisation. This method makes the simplifying assumption that only one single causal variant exists for any given trait in any genomic region.

Hypothesis Description H0 no association with either trait in the region H1 association with trait 1 only H2 association with trait 2 only H3 both traits are associated, but have different single causal variants H4 both traits are associated and share the same single causal variant

Approximate Bayes factors required

Coloc requires the availability of approximate Bayes factors (ABF) for each variant in the credible set (logABF column).

Source code in src/otg/method/colocalisation.py
class Coloc:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals from credible sets.\n\n    Based on the [R COLOC package](https://github.com/chr1swallace/coloc/blob/main/R/claudia.R), which uses the Bayes factors from the credible set to estimate the posterior probability of colocalisation. This method makes the simplifying assumption that **only one single causal variant** exists for any given trait in any genomic region.\n\n    | Hypothesis    | Description                                                           |\n    | ------------- | --------------------------------------------------------------------- |\n    | H<sub>0</sub> | no association with either trait in the region                        |\n    | H<sub>1</sub> | association with trait 1 only                                         |\n    | H<sub>2</sub> | association with trait 2 only                                         |\n    | H<sub>3</sub> | both traits are associated, but have different single causal variants |\n    | H<sub>4</sub> | both traits are associated and share the same single causal variant   |\n\n    !!! warning \"Approximate Bayes factors required\"\n        Coloc requires the availability of approximate Bayes factors (ABF) for each variant in the credible set (`logABF` column).\n\n    \"\"\"\n\n    @staticmethod\n    def _get_logsum(log_abf: ndarray) -> float:\n\"\"\"Calculates logsum of vector.\n\n        This function calculates the log of the sum of the exponentiated\n        logs taking out the max, i.e. insuring that the sum is not Inf\n\n        Args:\n            log_abf (ndarray): log approximate bayes factor\n\n        Returns:\n            float: logsum\n\n        Example:\n            >>> l = [0.2, 0.1, 0.05, 0]\n            >>> round(Coloc._get_logsum(l), 6)\n            1.476557\n        \"\"\"\n        themax = np.max(log_abf)\n        result = themax + np.log(np.sum(np.exp(log_abf - themax)))\n        return float(result)\n\n    @staticmethod\n    def _get_posteriors(all_abfs: ndarray) -> DenseVector:\n\"\"\"Calculate posterior probabilities for each hypothesis.\n\n        Args:\n            all_abfs (ndarray): h0-h4 bayes factors\n\n        Returns:\n            DenseVector: Posterior\n\n        Example:\n            >>> l = np.array([0.2, 0.1, 0.05, 0])\n            >>> Coloc._get_posteriors(l)\n            DenseVector([0.279, 0.2524, 0.2401, 0.2284])\n        \"\"\"\n        diff = all_abfs - Coloc._get_logsum(all_abfs)\n        abfs_posteriors = np.exp(diff)\n        return Vectors.dense(abfs_posteriors)\n\n    @classmethod\n    def colocalise(\n        cls: type[Coloc],\n        overlapping_signals: StudyLocusOverlap,\n        priorc1: float = 1e-4,\n        priorc2: float = 1e-4,\n        priorc12: float = 1e-5,\n    ) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n        Args:\n            overlapping_signals (StudyLocusOverlap): overlapping peaks\n            priorc1 (float): Prior on variant being causal for trait 1. Defaults to 1e-4.\n            priorc2 (float): Prior on variant being causal for trait 2. Defaults to 1e-4.\n            priorc12 (float): Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.\n\n        Returns:\n            Colocalisation: Colocalisation results\n        \"\"\"\n        # register udfs\n        logsum = f.udf(Coloc._get_logsum, DoubleType())\n        posteriors = f.udf(Coloc._get_posteriors, VectorUDT())\n        return Colocalisation(\n            _df=(\n                overlapping_signals.df\n                # Before summing log_abf columns nulls need to be filled with 0:\n                .fillna(0, subset=[\"left_logABF\", \"right_logABF\"])\n                # Sum of log_abfs for each pair of signals\n                .withColumn(\"sum_log_abf\", f.col(\"left_logABF\") + f.col(\"right_logABF\"))\n                # Group by overlapping peak and generating dense vectors of log_abf:\n                .groupBy(\"chromosome\", \"left_studyLocusId\", \"right_studyLocusId\")\n                .agg(\n                    f.count(\"*\").alias(\"coloc_n_vars\"),\n                    fml.array_to_vector(f.collect_list(f.col(\"left_logABF\"))).alias(\n                        \"left_logABF\"\n                    ),\n                    fml.array_to_vector(f.collect_list(f.col(\"right_logABF\"))).alias(\n                        \"right_logABF\"\n                    ),\n                    fml.array_to_vector(f.collect_list(f.col(\"sum_log_abf\"))).alias(\n                        \"sum_log_abf\"\n                    ),\n                )\n                .withColumn(\"logsum1\", logsum(f.col(\"left_logABF\")))\n                .withColumn(\"logsum2\", logsum(f.col(\"right_logABF\")))\n                .withColumn(\"logsum12\", logsum(f.col(\"sum_log_abf\")))\n                .drop(\"left_logABF\", \"right_logABF\", \"sum_log_abf\")\n                # Add priors\n                # priorc1 Prior on variant being causal for trait 1\n                .withColumn(\"priorc1\", f.lit(priorc1))\n                # priorc2 Prior on variant being causal for trait 2\n                .withColumn(\"priorc2\", f.lit(priorc2))\n                # priorc12 Prior on variant being causal for traits 1 and 2\n                .withColumn(\"priorc12\", f.lit(priorc12))\n                # h0-h2\n                .withColumn(\"lH0abf\", f.lit(0))\n                .withColumn(\"lH1abf\", f.log(f.col(\"priorc1\")) + f.col(\"logsum1\"))\n                .withColumn(\"lH2abf\", f.log(f.col(\"priorc2\")) + f.col(\"logsum2\"))\n                # h3\n                .withColumn(\"sumlogsum\", f.col(\"logsum1\") + f.col(\"logsum2\"))\n                # exclude null H3/H4s: due to sumlogsum == logsum12\n                .filter(f.col(\"sumlogsum\") != f.col(\"logsum12\"))\n                .withColumn(\"max\", f.greatest(\"sumlogsum\", \"logsum12\"))\n                .withColumn(\n                    \"logdiff\",\n                    (\n                        f.col(\"max\")\n                        + f.log(\n                            f.exp(f.col(\"sumlogsum\") - f.col(\"max\"))\n                            - f.exp(f.col(\"logsum12\") - f.col(\"max\"))\n                        )\n                    ),\n                )\n                .withColumn(\n                    \"lH3abf\",\n                    f.log(f.col(\"priorc1\"))\n                    + f.log(f.col(\"priorc2\"))\n                    + f.col(\"logdiff\"),\n                )\n                .drop(\"right_logsum\", \"left_logsum\", \"sumlogsum\", \"max\", \"logdiff\")\n                # h4\n                .withColumn(\"lH4abf\", f.log(f.col(\"priorc12\")) + f.col(\"logsum12\"))\n                # cleaning\n                .drop(\n                    \"priorc1\", \"priorc2\", \"priorc12\", \"logsum1\", \"logsum2\", \"logsum12\"\n                )\n                # posteriors\n                .withColumn(\n                    \"allABF\",\n                    fml.array_to_vector(\n                        f.array(\n                            f.col(\"lH0abf\"),\n                            f.col(\"lH1abf\"),\n                            f.col(\"lH2abf\"),\n                            f.col(\"lH3abf\"),\n                            f.col(\"lH4abf\"),\n                        )\n                    ),\n                )\n                .withColumn(\n                    \"posteriors\", fml.vector_to_array(posteriors(f.col(\"allABF\")))\n                )\n                .withColumn(\"coloc_h0\", f.col(\"posteriors\").getItem(0))\n                .withColumn(\"coloc_h1\", f.col(\"posteriors\").getItem(1))\n                .withColumn(\"coloc_h2\", f.col(\"posteriors\").getItem(2))\n                .withColumn(\"coloc_h3\", f.col(\"posteriors\").getItem(3))\n                .withColumn(\"coloc_h4\", f.col(\"posteriors\").getItem(4))\n                .withColumn(\"coloc_h4_h3\", f.col(\"coloc_h4\") / f.col(\"coloc_h3\"))\n                .withColumn(\"coloc_log2_h4_h3\", f.log2(f.col(\"coloc_h4_h3\")))\n                # clean up\n                .drop(\n                    \"posteriors\",\n                    \"allABF\",\n                    \"coloc_h4_h3\",\n                    \"lH0abf\",\n                    \"lH1abf\",\n                    \"lH2abf\",\n                    \"lH3abf\",\n                    \"lH4abf\",\n                )\n                .withColumn(\"colocalisationMethod\", f.lit(\"COLOC\"))\n            )\n        )\n
"},{"location":"components/method/coloc/#otg.method.colocalisation.Coloc.colocalise","title":"colocalise(overlapping_signals, priorc1=0.0001, priorc2=0.0001, priorc12=1e-05) classmethod","text":"

Calculate bayesian colocalisation based on overlapping signals.

Parameters:

Name Type Description Default overlapping_signals StudyLocusOverlap

overlapping peaks

required priorc1 float

Prior on variant being causal for trait 1. Defaults to 1e-4.

0.0001 priorc2 float

Prior on variant being causal for trait 2. Defaults to 1e-4.

0.0001 priorc12 float

Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.

1e-05

Returns:

Name Type Description Colocalisation Colocalisation

Colocalisation results

Source code in src/otg/method/colocalisation.py
@classmethod\ndef colocalise(\n    cls: type[Coloc],\n    overlapping_signals: StudyLocusOverlap,\n    priorc1: float = 1e-4,\n    priorc2: float = 1e-4,\n    priorc12: float = 1e-5,\n) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n    Args:\n        overlapping_signals (StudyLocusOverlap): overlapping peaks\n        priorc1 (float): Prior on variant being causal for trait 1. Defaults to 1e-4.\n        priorc2 (float): Prior on variant being causal for trait 2. Defaults to 1e-4.\n        priorc12 (float): Prior on variant being causal for traits 1 and 2. Defaults to 1e-5.\n\n    Returns:\n        Colocalisation: Colocalisation results\n    \"\"\"\n    # register udfs\n    logsum = f.udf(Coloc._get_logsum, DoubleType())\n    posteriors = f.udf(Coloc._get_posteriors, VectorUDT())\n    return Colocalisation(\n        _df=(\n            overlapping_signals.df\n            # Before summing log_abf columns nulls need to be filled with 0:\n            .fillna(0, subset=[\"left_logABF\", \"right_logABF\"])\n            # Sum of log_abfs for each pair of signals\n            .withColumn(\"sum_log_abf\", f.col(\"left_logABF\") + f.col(\"right_logABF\"))\n            # Group by overlapping peak and generating dense vectors of log_abf:\n            .groupBy(\"chromosome\", \"left_studyLocusId\", \"right_studyLocusId\")\n            .agg(\n                f.count(\"*\").alias(\"coloc_n_vars\"),\n                fml.array_to_vector(f.collect_list(f.col(\"left_logABF\"))).alias(\n                    \"left_logABF\"\n                ),\n                fml.array_to_vector(f.collect_list(f.col(\"right_logABF\"))).alias(\n                    \"right_logABF\"\n                ),\n                fml.array_to_vector(f.collect_list(f.col(\"sum_log_abf\"))).alias(\n                    \"sum_log_abf\"\n                ),\n            )\n            .withColumn(\"logsum1\", logsum(f.col(\"left_logABF\")))\n            .withColumn(\"logsum2\", logsum(f.col(\"right_logABF\")))\n            .withColumn(\"logsum12\", logsum(f.col(\"sum_log_abf\")))\n            .drop(\"left_logABF\", \"right_logABF\", \"sum_log_abf\")\n            # Add priors\n            # priorc1 Prior on variant being causal for trait 1\n            .withColumn(\"priorc1\", f.lit(priorc1))\n            # priorc2 Prior on variant being causal for trait 2\n            .withColumn(\"priorc2\", f.lit(priorc2))\n            # priorc12 Prior on variant being causal for traits 1 and 2\n            .withColumn(\"priorc12\", f.lit(priorc12))\n            # h0-h2\n            .withColumn(\"lH0abf\", f.lit(0))\n            .withColumn(\"lH1abf\", f.log(f.col(\"priorc1\")) + f.col(\"logsum1\"))\n            .withColumn(\"lH2abf\", f.log(f.col(\"priorc2\")) + f.col(\"logsum2\"))\n            # h3\n            .withColumn(\"sumlogsum\", f.col(\"logsum1\") + f.col(\"logsum2\"))\n            # exclude null H3/H4s: due to sumlogsum == logsum12\n            .filter(f.col(\"sumlogsum\") != f.col(\"logsum12\"))\n            .withColumn(\"max\", f.greatest(\"sumlogsum\", \"logsum12\"))\n            .withColumn(\n                \"logdiff\",\n                (\n                    f.col(\"max\")\n                    + f.log(\n                        f.exp(f.col(\"sumlogsum\") - f.col(\"max\"))\n                        - f.exp(f.col(\"logsum12\") - f.col(\"max\"))\n                    )\n                ),\n            )\n            .withColumn(\n                \"lH3abf\",\n                f.log(f.col(\"priorc1\"))\n                + f.log(f.col(\"priorc2\"))\n                + f.col(\"logdiff\"),\n            )\n            .drop(\"right_logsum\", \"left_logsum\", \"sumlogsum\", \"max\", \"logdiff\")\n            # h4\n            .withColumn(\"lH4abf\", f.log(f.col(\"priorc12\")) + f.col(\"logsum12\"))\n            # cleaning\n            .drop(\n                \"priorc1\", \"priorc2\", \"priorc12\", \"logsum1\", \"logsum2\", \"logsum12\"\n            )\n            # posteriors\n            .withColumn(\n                \"allABF\",\n                fml.array_to_vector(\n                    f.array(\n                        f.col(\"lH0abf\"),\n                        f.col(\"lH1abf\"),\n                        f.col(\"lH2abf\"),\n                        f.col(\"lH3abf\"),\n                        f.col(\"lH4abf\"),\n                    )\n                ),\n            )\n            .withColumn(\n                \"posteriors\", fml.vector_to_array(posteriors(f.col(\"allABF\")))\n            )\n            .withColumn(\"coloc_h0\", f.col(\"posteriors\").getItem(0))\n            .withColumn(\"coloc_h1\", f.col(\"posteriors\").getItem(1))\n            .withColumn(\"coloc_h2\", f.col(\"posteriors\").getItem(2))\n            .withColumn(\"coloc_h3\", f.col(\"posteriors\").getItem(3))\n            .withColumn(\"coloc_h4\", f.col(\"posteriors\").getItem(4))\n            .withColumn(\"coloc_h4_h3\", f.col(\"coloc_h4\") / f.col(\"coloc_h3\"))\n            .withColumn(\"coloc_log2_h4_h3\", f.log2(f.col(\"coloc_h4_h3\")))\n            # clean up\n            .drop(\n                \"posteriors\",\n                \"allABF\",\n                \"coloc_h4_h3\",\n                \"lH0abf\",\n                \"lH1abf\",\n                \"lH2abf\",\n                \"lH3abf\",\n                \"lH4abf\",\n            )\n            .withColumn(\"colocalisationMethod\", f.lit(\"COLOC\"))\n        )\n    )\n
"},{"location":"components/method/ecaviar/","title":"eCAVIAR","text":"

ECaviar-based colocalisation analysis.

It extends CAVIAR\u00a0framework to explicitly estimate the posterior probability that the same variant is causal in 2 studies while accounting for the uncertainty of LD. eCAVIAR computes the colocalization posterior probability (CLPP) by utilizing the marginal posterior probabilities. This framework allows for multiple variants to be causal in a single locus.

Source code in src/otg/method/colocalisation.py
class ECaviar:\n\"\"\"ECaviar-based colocalisation analysis.\n\n    It extends [CAVIAR](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5142122/#bib18)\u00a0framework to explicitly estimate the posterior probability that the same variant is causal in 2 studies while accounting for the uncertainty of LD. eCAVIAR computes the colocalization posterior probability (**CLPP**) by utilizing the marginal posterior probabilities. This framework allows for **multiple variants to be causal** in a single locus.\n    \"\"\"\n\n    @staticmethod\n    def _get_clpp(left_pp: Column, right_pp: Column) -> Column:\n\"\"\"Calculate the colocalisation posterior probability (CLPP).\n\n        If the fact that the same variant is found causal for two studies are independent events,\n        CLPP is defined as the product of posterior porbabilities that a variant is causal in both studies.\n\n        Args:\n            left_pp (Column): left posterior probability\n            right_pp (Column): right posterior probability\n\n        Returns:\n            Column: CLPP\n\n        Examples:\n            >>> d = [{\"left_pp\": 0.5, \"right_pp\": 0.5}, {\"left_pp\": 0.25, \"right_pp\": 0.75}]\n            >>> df = spark.createDataFrame(d)\n            >>> df.withColumn(\"clpp\", ECaviar._get_clpp(f.col(\"left_pp\"), f.col(\"right_pp\"))).show()\n            +-------+--------+------+\n            |left_pp|right_pp|  clpp|\n            +-------+--------+------+\n            |    0.5|     0.5|  0.25|\n            |   0.25|    0.75|0.1875|\n            +-------+--------+------+\n            <BLANKLINE>\n\n        \"\"\"\n        return left_pp * right_pp\n\n    @classmethod\n    def colocalise(\n        cls: type[ECaviar], overlapping_signals: StudyLocusOverlap\n    ) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n        Args:\n            overlapping_signals (StudyLocusOverlap): overlapping signals.\n\n        Returns:\n            Colocalisation: colocalisation results based on eCAVIAR.\n        \"\"\"\n        return Colocalisation(\n            _df=(\n                overlapping_signals.df.withColumn(\n                    \"clpp\",\n                    ECaviar._get_clpp(\n                        f.col(\"left_posteriorProbability\"),\n                        f.col(\"right_posteriorProbability\"),\n                    ),\n                )\n                .groupBy(\"left_studyLocusId\", \"right_studyLocusId\", \"chromosome\")\n                .agg(\n                    f.count(\"*\").alias(\"coloc_n_vars\"),\n                    f.sum(f.col(\"clpp\")).alias(\"clpp\"),\n                )\n                .withColumn(\"colocalisationMethod\", f.lit(\"eCAVIAR\"))\n            )\n        )\n
"},{"location":"components/method/ecaviar/#otg.method.colocalisation.ECaviar.colocalise","title":"colocalise(overlapping_signals) classmethod","text":"

Calculate bayesian colocalisation based on overlapping signals.

Parameters:

Name Type Description Default overlapping_signals StudyLocusOverlap

overlapping signals.

required

Returns:

Name Type Description Colocalisation Colocalisation

colocalisation results based on eCAVIAR.

Source code in src/otg/method/colocalisation.py
@classmethod\ndef colocalise(\n    cls: type[ECaviar], overlapping_signals: StudyLocusOverlap\n) -> Colocalisation:\n\"\"\"Calculate bayesian colocalisation based on overlapping signals.\n\n    Args:\n        overlapping_signals (StudyLocusOverlap): overlapping signals.\n\n    Returns:\n        Colocalisation: colocalisation results based on eCAVIAR.\n    \"\"\"\n    return Colocalisation(\n        _df=(\n            overlapping_signals.df.withColumn(\n                \"clpp\",\n                ECaviar._get_clpp(\n                    f.col(\"left_posteriorProbability\"),\n                    f.col(\"right_posteriorProbability\"),\n                ),\n            )\n            .groupBy(\"left_studyLocusId\", \"right_studyLocusId\", \"chromosome\")\n            .agg(\n                f.count(\"*\").alias(\"coloc_n_vars\"),\n                f.sum(f.col(\"clpp\")).alias(\"clpp\"),\n            )\n            .withColumn(\"colocalisationMethod\", f.lit(\"eCAVIAR\"))\n        )\n    )\n
"},{"location":"components/method/ld_annotator/","title":"LD annotator","text":"

Class to annotate linkage disequilibrium (LD) operations from GnomAD.

Source code in src/otg/method/ld.py
class LDAnnotatorGnomad:\n\"\"\"Class to annotate linkage disequilibrium (LD) operations from GnomAD.\"\"\"\n\n    @staticmethod\n    def _query_block_matrix(\n        bm: BlockMatrix,\n        idxs: list[int],\n        starts: list[int],\n        stops: list[int],\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"Query block matrix for idxs rows sparsified by start/stop columns.\n\n        Args:\n            bm (BlockMatrix): LD matrix containing r values\n            idxs (List[int]): Row indexes to query (distinct and incremental)\n            starts (List[int]): Interval start column indexes (same size as idxs)\n            stops (List[int]): Interval stop column indexes (same size as idxs)\n            min_r2 (float): Minimum r2 to keep\n\n        Returns:\n            DataFrame: i,j,r where i and j are the row and column indexes and r is the LD\n\n        Examples:\n            >>> import numpy as np\n            >>> r = np.array([[1, 0.8, 0.7, 0.2],\n            ...               [0.8, 1, 0.6, 0.1],\n            ...               [0.7, 0.6, 1, 0.3],\n            ...               [0.2, 0.1, 0.3, 1]])\n            >>> bm_r = BlockMatrix.from_numpy(r) # doctest: +SKIP\n            >>> LDAnnotatorGnomad._query_block_matrix(bm_r, [1, 2], [0, 1], [3, 4], 0.5).show() # doctest: +SKIP\n            +---+---+---+\n            |  i|  j|  r|\n            +---+---+---+\n            |  0|  0|0.8|\n            |  0|  1|1.0|\n            |  1|  2|1.0|\n            +---+---+---+\n            <BLANKLINE>\n        \"\"\"\n        bm_sparsified = bm.filter_rows(idxs).sparsify_row_intervals(\n            starts, stops, blocks_only=True\n        )\n        entries = bm_sparsified.entries(keyed=False)\n\n        return (\n            entries.rename({\"entry\": \"r\"})\n            .to_spark()\n            .filter(f.col(\"r\") ** 2 >= min_r2)\n            .withColumn(\"r\", f.when(f.col(\"r\") >= 1, f.lit(1)).otherwise(f.col(\"r\")))\n        )\n\n    @staticmethod\n    def _variant_coordinates_in_ldindex(\n        variants_df: DataFrame,\n        ld_index: LDIndex,\n    ) -> DataFrame:\n\"\"\"Idxs for variants, first variant in the region and last variant in the region in precomputed ld index.\n\n        It checks if the window defined by the start/stop indices is maintained after lifting over the variants.\n\n        Args:\n            variants_df (DataFrame): Lead variants from `_annotate_index_intervals` output\n            ld_index (LDIndex): LD index precomputed\n\n        Returns:\n            DataFrame: LD coordinates [variantId, chromosome, gnomadPopulation, i, idxs, start_idx and stop_idx]\n        \"\"\"\n        w = Window.orderBy(\"chromosome\", \"idx\")\n        return (\n            variants_df.join(\n                ld_index.df,\n                on=[\"variantId\", \"chromosome\"],\n            )\n            .select(\n                \"variantId\",\n                \"chromosome\",\n                \"gnomadPopulation\",\n                \"idx\",\n                \"start_idx\",\n                \"stop_idx\",\n            )\n            .distinct()\n            # necessary to resolve return of .entries() function\n            .withColumn(\"i\", f.row_number().over(w))\n            # the dataframe has to be ordered to query the block matrix\n            .orderBy(\"idx\")\n        )\n\n    @staticmethod\n    def weighted_r_overall(\n        chromosome: Column,\n        study_id: Column,\n        variant_id: Column,\n        tag_variant_id: Column,\n        relative_sample_size: Column,\n        r: Column,\n    ) -> Column:\n\"\"\"Aggregation of weighted R information using ancestry proportions.\n\n        The method implements a simple average weighted by the relative population sizes.\n\n        Args:\n            chromosome (Column): Chromosome\n            study_id (Column): Study identifier\n            variant_id (Column): Variant identifier\n            tag_variant_id (Column): Tag variant identifier\n            relative_sample_size (Column): Relative sample size\n            r (Column): Correlation\n\n        Returns:\n            Column: Estimates weighted R information\n\n        Examples:\n            >>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n            >>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n            >>> (\n            ...    spark.createDataFrame(data, columns)\n            ...     .withColumn('chr', f.lit('chr1'))\n            ...     .withColumn('study_id', f.lit('s1'))\n            ...     .withColumn('variant_id', f.lit('v1'))\n            ...     .withColumn(\n            ...         'r_overall',\n            ...         LDAnnotatorGnomad.weighted_r_overall(\n            ...             f.col('chr'),\n            ...             f.col('study_id'),\n            ...             f.col('variant_id'),\n            ...             f.col('tag_variant_id'),\n            ...             f.col('relative_sample_size'),\n            ...             f.col('r')\n            ...         )\n            ...     )\n            ...     .show()\n            ... )\n            +--------------+--------------------+----+----+--------+----------+---------+\n            |tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n            +--------------+--------------------+----+----+--------+----------+---------+\n            |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n            |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n            |            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n            +--------------+--------------------+----+----+--------+----------+---------+\n            <BLANKLINE>\n        \"\"\"\n        pseudo_r = f.when(r >= 1, 0.9999995).otherwise(r)\n        return f.round(\n            f.sum(pseudo_r * relative_sample_size).over(\n                Window.partitionBy(chromosome, study_id, variant_id, tag_variant_id)\n            ),\n            6,\n        )\n\n    @staticmethod\n    def _flag_partial_mapped(\n        study_id: Column, variant_id: Column, tag_variant_id: Column\n    ) -> Column:\n\"\"\"Generate flag for lead/tag pairs.\n\n        Some lead variants can be resolved in one population but not in other. Those rows interfere with PICS calculation, so they needs to be dropped.\n\n        Args:\n            study_id (Column): Study identifier column\n            variant_id (Column): Identifier of the lead variant\n            tag_variant_id (Column): Identifier of the tag variant\n\n        Returns:\n            Column: Boolean\n\n        Examples:\n            >>> data = [\n            ...     ('study_1', 'lead_1', 'tag_1'),  # <- keep row as tag available.\n            ...     ('study_1', 'lead_1', 'tag_2'),  # <- keep row as tag available.\n            ...     ('study_1', 'lead_2', 'tag_3'),  # <- keep row as tag available\n            ...     ('study_1', 'lead_2', None),  # <- drop row as lead 2 is resolved.\n            ...     ('study_1', 'lead_3', None)   # <- keep row as lead 3 is not resolved.\n            ... ]\n            >>> (\n            ...     spark.createDataFrame(data, ['studyId', 'variantId', 'tagVariantId'])\n            ...     .withColumn(\"flag_to_keep_tag\", LDAnnotatorGnomad._flag_partial_mapped(f.col('studyId'), f.col('variantId'), f.col('tagVariantId')))\n            ...     .show()\n            ... )\n            +-------+---------+------------+----------------+\n            |studyId|variantId|tagVariantId|flag_to_keep_tag|\n            +-------+---------+------------+----------------+\n            |study_1|   lead_1|       tag_1|            true|\n            |study_1|   lead_1|       tag_2|            true|\n            |study_1|   lead_2|       tag_3|            true|\n            |study_1|   lead_2|        null|           false|\n            |study_1|   lead_3|        null|            true|\n            +-------+---------+------------+----------------+\n            <BLANKLINE>\n        \"\"\"\n        return tag_variant_id.isNotNull() | ~f.array_contains(\n            f.collect_set(tag_variant_id.isNotNull()).over(\n                Window.partitionBy(study_id, variant_id)\n            ),\n            True,\n        )\n\n    @staticmethod\n    def get_ld_annotated_assocs_for_population(\n        population: str,\n        ld_index: LDIndex,\n        ld_matrix: BlockMatrix,\n        locus_ancestry: DataFrame,\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"This function annotates association data with LD information.\"\"\"\n        # map variants to precomputed LD indexes from gnomAD\n        variants_in_pop = locus_ancestry.filter(f.col(\"gnomadPopulation\") == population)\n        variants_ld_coordinates = LDAnnotatorGnomad._variant_coordinates_in_ldindex(\n            variants_in_pop, ld_index\n        ).persist()\n\n        # idxs for lead, first variant in the region and last variant in the region\n        variants_ld_scores = LDAnnotatorGnomad._query_block_matrix(\n            ld_matrix + ld_matrix.T,\n            variants_ld_coordinates.rdd.map(lambda x: x.idx).collect(),\n            variants_ld_coordinates.rdd.map(lambda x: x.start_idx).collect(),\n            variants_ld_coordinates.rdd.map(lambda x: x.stop_idx).collect(),\n            min_r2,\n        )\n\n        # aggregate LD info\n        variants_ld_info = variants_ld_scores.join(\n            f.broadcast(variants_ld_coordinates),\n            on=\"i\",\n            how=\"inner\",\n        ).select(\"variantId\", \"chromosome\", \"gnomadPopulation\", \"j\", \"r\")\n\n        variants_ld_coordinates.unpersist()\n        return LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop(\n            variants_ld_info=variants_ld_info,\n            ld_index=ld_index,\n        )\n\n    @classmethod\n    def variants_in_ld_in_gnomad_pop(\n        cls: type[LDAnnotatorGnomad],\n        variants_ld_info: DataFrame,\n        ld_index: LDIndex,\n    ) -> DataFrame:\n\"\"\"Return LD annotation for variants in specific gnomad population.\n\n        Args:\n            variants_ld_info (DataFrame): variant and their LD scores (r) and coordinates from the LD matrix of a population\n            ld_index (LDIndex): LD index precomputed\n\n        Returns:\n            DataFrame: LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n        \"\"\"\n        return (\n            variants_ld_info.alias(\"left\")\n            .join(\n                ld_index.df.select(\n                    f.col(\"chromosome\"),\n                    f.col(\"variantId\").alias(\"tagVariantId\"),\n                    f.col(\"idx\").alias(\"tag_idx\"),\n                ).alias(\"tags\"),\n                on=[\n                    f.col(\"left.chromosome\") == f.col(\"tags.chromosome\"),\n                    f.col(\"left.j\") == f.col(\"tags.tag_idx\"),\n                ],\n            )\n            .select(\n                \"variantId\", \"left.chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"\n            )\n        )\n\n    @classmethod\n    def ld_annotation_by_locus_ancestry(\n        cls: type[LDAnnotatorGnomad],\n        session: Session,\n        associations: StudyLocusGWASCatalog,\n        studies: StudyIndexGWASCatalog,\n        ld_populations: list[str],\n        ld_index_template: str,\n        ld_matrix_template: str,\n        min_r2: float,\n    ) -> DataFrame:\n\"\"\"LD information for all locus and ancestries.\n\n        Args:\n            session (Session): Session\n            associations (StudyLocusGWASCatalog): GWAS associations\n            studies (StudyIndexGWASCatalog): study metadata of the associations\n            ld_populations (list[str]): List of populations to annotate\n            ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n            ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n            min_r2 (float): minimum r2 to keep\n\n        Returns:\n            DataFrame: LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n        \"\"\"\n        # Unique lead - population pairs:\n        locus_ancestry = (\n            associations.unique_study_locus_ancestries(studies)\n            # Ignoring study information / relativeSampleSize to get unique lead-ancestry pairs\n            .drop(\"studyId\", \"relativeSampleSize\")\n            .distinct()\n            .persist()\n        )\n\n        # All gnomad populations captured in associations:\n        assoc_populations = locus_ancestry.rdd.map(\n            lambda x: x.gnomadPopulation\n        ).collect()\n\n        # Retrieve LD information from gnomAD\n        ld_annotated_assocs = []\n        for population in ld_populations:\n            if population in assoc_populations:\n                pop_parsed_ldindex_path = ld_index_template.format(POP=population)\n                pop_matrix_path = ld_matrix_template.format(POP=population)\n                ld_index = LDIndex.from_parquet(session, pop_parsed_ldindex_path)\n                ld_matrix = BlockMatrix.read(pop_matrix_path)\n                ld_annotated_assocs.append(\n                    LDAnnotatorGnomad.get_ld_annotated_assocs_for_population(\n                        population,\n                        ld_index,\n                        ld_matrix,\n                        locus_ancestry,\n                        min_r2,\n                    ).coalesce(400)\n                )\n        return reduce(DataFrame.unionByName, ld_annotated_assocs)\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.get_ld_annotated_assocs_for_population","title":"get_ld_annotated_assocs_for_population(population, ld_index, ld_matrix, locus_ancestry, min_r2) staticmethod","text":"

This function annotates association data with LD information.

Source code in src/otg/method/ld.py
@staticmethod\ndef get_ld_annotated_assocs_for_population(\n    population: str,\n    ld_index: LDIndex,\n    ld_matrix: BlockMatrix,\n    locus_ancestry: DataFrame,\n    min_r2: float,\n) -> DataFrame:\n\"\"\"This function annotates association data with LD information.\"\"\"\n    # map variants to precomputed LD indexes from gnomAD\n    variants_in_pop = locus_ancestry.filter(f.col(\"gnomadPopulation\") == population)\n    variants_ld_coordinates = LDAnnotatorGnomad._variant_coordinates_in_ldindex(\n        variants_in_pop, ld_index\n    ).persist()\n\n    # idxs for lead, first variant in the region and last variant in the region\n    variants_ld_scores = LDAnnotatorGnomad._query_block_matrix(\n        ld_matrix + ld_matrix.T,\n        variants_ld_coordinates.rdd.map(lambda x: x.idx).collect(),\n        variants_ld_coordinates.rdd.map(lambda x: x.start_idx).collect(),\n        variants_ld_coordinates.rdd.map(lambda x: x.stop_idx).collect(),\n        min_r2,\n    )\n\n    # aggregate LD info\n    variants_ld_info = variants_ld_scores.join(\n        f.broadcast(variants_ld_coordinates),\n        on=\"i\",\n        how=\"inner\",\n    ).select(\"variantId\", \"chromosome\", \"gnomadPopulation\", \"j\", \"r\")\n\n    variants_ld_coordinates.unpersist()\n    return LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop(\n        variants_ld_info=variants_ld_info,\n        ld_index=ld_index,\n    )\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.ld_annotation_by_locus_ancestry","title":"ld_annotation_by_locus_ancestry(session, associations, studies, ld_populations, ld_index_template, ld_matrix_template, min_r2) classmethod","text":"

LD information for all locus and ancestries.

Parameters:

Name Type Description Default session Session

Session

required associations StudyLocusGWASCatalog

GWAS associations

required studies StudyIndexGWASCatalog

study metadata of the associations

required ld_populations list[str]

List of populations to annotate

required ld_index_template str

Template path of the LD matrix index containing {POP} where the population is expected

required ld_matrix_template str

Template path of the LD matrix containing {POP} where the population is expected

required min_r2 float

minimum r2 to keep

required

Returns:

Name Type Description DataFrame DataFrame

LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]

Source code in src/otg/method/ld.py
@classmethod\ndef ld_annotation_by_locus_ancestry(\n    cls: type[LDAnnotatorGnomad],\n    session: Session,\n    associations: StudyLocusGWASCatalog,\n    studies: StudyIndexGWASCatalog,\n    ld_populations: list[str],\n    ld_index_template: str,\n    ld_matrix_template: str,\n    min_r2: float,\n) -> DataFrame:\n\"\"\"LD information for all locus and ancestries.\n\n    Args:\n        session (Session): Session\n        associations (StudyLocusGWASCatalog): GWAS associations\n        studies (StudyIndexGWASCatalog): study metadata of the associations\n        ld_populations (list[str]): List of populations to annotate\n        ld_index_template (str): Template path of the LD matrix index containing `{POP}` where the population is expected\n        ld_matrix_template (str): Template path of the LD matrix containing `{POP}` where the population is expected\n        min_r2 (float): minimum r2 to keep\n\n    Returns:\n        DataFrame: LD annotation [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n    \"\"\"\n    # Unique lead - population pairs:\n    locus_ancestry = (\n        associations.unique_study_locus_ancestries(studies)\n        # Ignoring study information / relativeSampleSize to get unique lead-ancestry pairs\n        .drop(\"studyId\", \"relativeSampleSize\")\n        .distinct()\n        .persist()\n    )\n\n    # All gnomad populations captured in associations:\n    assoc_populations = locus_ancestry.rdd.map(\n        lambda x: x.gnomadPopulation\n    ).collect()\n\n    # Retrieve LD information from gnomAD\n    ld_annotated_assocs = []\n    for population in ld_populations:\n        if population in assoc_populations:\n            pop_parsed_ldindex_path = ld_index_template.format(POP=population)\n            pop_matrix_path = ld_matrix_template.format(POP=population)\n            ld_index = LDIndex.from_parquet(session, pop_parsed_ldindex_path)\n            ld_matrix = BlockMatrix.read(pop_matrix_path)\n            ld_annotated_assocs.append(\n                LDAnnotatorGnomad.get_ld_annotated_assocs_for_population(\n                    population,\n                    ld_index,\n                    ld_matrix,\n                    locus_ancestry,\n                    min_r2,\n                ).coalesce(400)\n            )\n    return reduce(DataFrame.unionByName, ld_annotated_assocs)\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.variants_in_ld_in_gnomad_pop","title":"variants_in_ld_in_gnomad_pop(variants_ld_info, ld_index) classmethod","text":"

Return LD annotation for variants in specific gnomad population.

Parameters:

Name Type Description Default variants_ld_info DataFrame

variant and their LD scores (r) and coordinates from the LD matrix of a population

required ld_index LDIndex

LD index precomputed

required

Returns:

Name Type Description DataFrame DataFrame

LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]

Source code in src/otg/method/ld.py
@classmethod\ndef variants_in_ld_in_gnomad_pop(\n    cls: type[LDAnnotatorGnomad],\n    variants_ld_info: DataFrame,\n    ld_index: LDIndex,\n) -> DataFrame:\n\"\"\"Return LD annotation for variants in specific gnomad population.\n\n    Args:\n        variants_ld_info (DataFrame): variant and their LD scores (r) and coordinates from the LD matrix of a population\n        ld_index (LDIndex): LD index precomputed\n\n    Returns:\n        DataFrame: LD information in the columns [\"variantId\", \"chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"]\n    \"\"\"\n    return (\n        variants_ld_info.alias(\"left\")\n        .join(\n            ld_index.df.select(\n                f.col(\"chromosome\"),\n                f.col(\"variantId\").alias(\"tagVariantId\"),\n                f.col(\"idx\").alias(\"tag_idx\"),\n            ).alias(\"tags\"),\n            on=[\n                f.col(\"left.chromosome\") == f.col(\"tags.chromosome\"),\n                f.col(\"left.j\") == f.col(\"tags.tag_idx\"),\n            ],\n        )\n        .select(\n            \"variantId\", \"left.chromosome\", \"gnomadPopulation\", \"tagVariantId\", \"r\"\n        )\n    )\n
"},{"location":"components/method/ld_annotator/#otg.method.ld.LDAnnotatorGnomad.weighted_r_overall","title":"weighted_r_overall(chromosome, study_id, variant_id, tag_variant_id, relative_sample_size, r) staticmethod","text":"

Aggregation of weighted R information using ancestry proportions.

The method implements a simple average weighted by the relative population sizes.

Parameters:

Name Type Description Default chromosome Column

Chromosome

required study_id Column

Study identifier

required variant_id Column

Variant identifier

required tag_variant_id Column

Tag variant identifier

required relative_sample_size Column

Relative sample size

required r Column

Correlation

required

Returns:

Name Type Description Column Column

Estimates weighted R information

Examples:

>>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n>>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n>>> (\n...    spark.createDataFrame(data, columns)\n...     .withColumn('chr', f.lit('chr1'))\n...     .withColumn('study_id', f.lit('s1'))\n...     .withColumn('variant_id', f.lit('v1'))\n...     .withColumn(\n...         'r_overall',\n...         LDAnnotatorGnomad.weighted_r_overall(\n...             f.col('chr'),\n...             f.col('study_id'),\n...             f.col('variant_id'),\n...             f.col('tag_variant_id'),\n...             f.col('relative_sample_size'),\n...             f.col('r')\n...         )\n...     )\n...     .show()\n... )\n+--------------+--------------------+----+----+--------+----------+---------+\n|tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n+--------------+--------------------+----+----+--------+----------+---------+\n|            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n|            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n|            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n+--------------+--------------------+----+----+--------+----------+---------+\n
Source code in src/otg/method/ld.py
@staticmethod\ndef weighted_r_overall(\n    chromosome: Column,\n    study_id: Column,\n    variant_id: Column,\n    tag_variant_id: Column,\n    relative_sample_size: Column,\n    r: Column,\n) -> Column:\n\"\"\"Aggregation of weighted R information using ancestry proportions.\n\n    The method implements a simple average weighted by the relative population sizes.\n\n    Args:\n        chromosome (Column): Chromosome\n        study_id (Column): Study identifier\n        variant_id (Column): Variant identifier\n        tag_variant_id (Column): Tag variant identifier\n        relative_sample_size (Column): Relative sample size\n        r (Column): Correlation\n\n    Returns:\n        Column: Estimates weighted R information\n\n    Examples:\n        >>> data = [('t3', 0.25, 0.2), ('t3', 0.25, 0.2), ('t3', 0.5, 0.99)]\n        >>> columns = ['tag_variant_id', 'relative_sample_size', 'r']\n        >>> (\n        ...    spark.createDataFrame(data, columns)\n        ...     .withColumn('chr', f.lit('chr1'))\n        ...     .withColumn('study_id', f.lit('s1'))\n        ...     .withColumn('variant_id', f.lit('v1'))\n        ...     .withColumn(\n        ...         'r_overall',\n        ...         LDAnnotatorGnomad.weighted_r_overall(\n        ...             f.col('chr'),\n        ...             f.col('study_id'),\n        ...             f.col('variant_id'),\n        ...             f.col('tag_variant_id'),\n        ...             f.col('relative_sample_size'),\n        ...             f.col('r')\n        ...         )\n        ...     )\n        ...     .show()\n        ... )\n        +--------------+--------------------+----+----+--------+----------+---------+\n        |tag_variant_id|relative_sample_size|   r| chr|study_id|variant_id|r_overall|\n        +--------------+--------------------+----+----+--------+----------+---------+\n        |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n        |            t3|                0.25| 0.2|chr1|      s1|        v1|    0.595|\n        |            t3|                 0.5|0.99|chr1|      s1|        v1|    0.595|\n        +--------------+--------------------+----+----+--------+----------+---------+\n        <BLANKLINE>\n    \"\"\"\n    pseudo_r = f.when(r >= 1, 0.9999995).otherwise(r)\n    return f.round(\n        f.sum(pseudo_r * relative_sample_size).over(\n            Window.partitionBy(chromosome, study_id, variant_id, tag_variant_id)\n        ),\n        6,\n    )\n
"},{"location":"components/method/pics/","title":"PICS","text":"

Probabilistic Identification of Causal SNPs (PICS), an algorithm estimating the probability that an individual variant is causal considering the haplotype structure and observed pattern of association at the genetic locus.

Source code in src/otg/method/pics.py
class PICS:\n\"\"\"Probabilistic Identification of Causal SNPs (PICS), an algorithm estimating the probability that an individual variant is causal considering the haplotype structure and observed pattern of association at the genetic locus.\"\"\"\n\n    @staticmethod\n    def _pics_relative_posterior_probability(\n        neglog_p: float, pics_snp_mu: float, pics_snp_std: float\n    ) -> float:\n\"\"\"Compute the PICS posterior probability for a given SNP.\n\n        !!! info \"This probability needs to be scaled to take into account the probabilities of the other variants in the locus.\"\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            pics_snp_mu (float): Mean P value of the association between a SNP and a trait\n            pics_snp_std (float): Standard deviation for the P value of the association between a SNP and a trait\n\n        Returns:\n            Relative posterior probability of a SNP being causal in a locus\n\n        Examples:\n            >>> rel_prob = PICS._pics_relative_posterior_probability(neglog_p=10.0, pics_snp_mu=1.0, pics_snp_std=10.0)\n            >>> round(rel_prob, 3)\n            0.368\n        \"\"\"\n        return float(norm(pics_snp_mu, pics_snp_std).sf(neglog_p) * 2)\n\n    @staticmethod\n    def _pics_standard_deviation(neglog_p: float, r2: float, k: float) -> float | None:\n\"\"\"Compute the PICS standard deviation.\n\n        This distribution is obtained after a series of permutation tests described in the PICS method, and it is only\n        valid when the SNP is highly linked with the lead (r2 > 0.5).\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            r2 (float): LD score between a given SNP and the lead variant\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            Standard deviation for the P value of the association between a SNP and a trait\n\n        Examples:\n            >>> PICS._pics_standard_deviation(neglog_p=1.0, r2=1.0, k=6.4)\n            0.0\n            >>> round(PICS._pics_standard_deviation(neglog_p=10.0, r2=0.5, k=6.4), 3)\n            0.143\n            >>> print(PICS._pics_standard_deviation(neglog_p=1.0, r2=0.0, k=6.4))\n            None\n        \"\"\"\n        return (\n            (1 - abs(r2) ** 0.5**k) ** 0.5 * (neglog_p) ** 0.5 / 2\n            if r2 >= 0.5\n            else None\n        )\n\n    @staticmethod\n    def _pics_mu(neglog_p: float, r2: float) -> float | None:\n\"\"\"Compute the PICS mu that estimates the probability of association between a given SNP and the trait.\n\n        This distribution is obtained after a series of permutation tests described in the PICS method, and it is only\n        valid when the SNP is highly linked with the lead (r2 > 0.5).\n\n        Args:\n            neglog_p (float): Negative log p-value of the lead variant\n            r2 (float): LD score between a given SNP and the lead variant\n\n        Returns:\n            Mean P value of the association between a SNP and a trait\n\n        Examples:\n            >>> PICS._pics_mu(neglog_p=1.0, r2=1.0)\n            1.0\n            >>> PICS._pics_mu(neglog_p=10.0, r2=0.5)\n            5.0\n            >>> print(PICS._pics_mu(neglog_p=10.0, r2=0.3))\n            None\n        \"\"\"\n        return neglog_p * r2 if r2 >= 0.5 else None\n\n    @staticmethod\n    def _finemap(\n        credible_set: list[Row], lead_neglog_p: float, k: float\n    ) -> list | None:\n\"\"\"Calculates the probability of a variant being causal in a study-locus context by applying the PICS method.\n\n        It is intended to be applied as an UDF in `PICS.finemap`, where each row is a StudyLocus association.\n        The function iterates over every SNP in the `credibleSet` array, and it returns an updated credibleSet with\n        its association signal and causality probability as of PICS.\n\n        Args:\n            credible_set (list): list of tagging variants after expanding the locus\n            lead_neglog_p (float): P value of the association signal between the lead variant and the study in the form of -log10.\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            List of tagging variants with an estimation of the association signal and their posterior probability as of PICS.\n        \"\"\"\n        if credible_set is None:\n            return None\n        elif not credible_set:\n            return []\n\n        tmp_credible_set = []\n        new_credible_set = []\n        # First iteration: calculation of mu, standard deviation, and the relative posterior probability\n        for tag_struct in credible_set:\n            tag_dict = (\n                tag_struct.asDict()\n            )  # tag_struct is of type pyspark.Row, we'll represent it as a dict\n            if (\n                not tag_dict[\"r2Overall\"]\n                or tag_dict[\"r2Overall\"] < 0.5\n                or not lead_neglog_p\n            ):\n                # If PICS cannot be calculated, we'll return the original credible set\n                new_credible_set.append(tag_dict)\n                continue\n            pics_snp_mu = PICS._pics_mu(lead_neglog_p, tag_dict[\"r2Overall\"])\n            pics_snp_std = PICS._pics_standard_deviation(\n                lead_neglog_p, tag_dict[\"r2Overall\"], k\n            )\n            pics_snp_std = 0.001 if pics_snp_std == 0 else pics_snp_std\n            if pics_snp_mu is not None and pics_snp_std is not None:\n                posterior_probability = PICS._pics_relative_posterior_probability(\n                    lead_neglog_p, pics_snp_mu, pics_snp_std\n                )\n                tag_dict[\"tagPValue\"] = 10**-pics_snp_mu\n                tag_dict[\"tagStandardError\"] = 10**-pics_snp_std\n                tag_dict[\"relativePosteriorProbability\"] = posterior_probability\n\n                tmp_credible_set.append(tag_dict)\n\n        # Second iteration: calculation of the sum of all the posteriors in each study-locus, so that we scale them between 0-1\n        total_posteriors = sum(\n            tag_dict.get(\"relativePosteriorProbability\", 0)\n            for tag_dict in tmp_credible_set\n        )\n\n        # Third iteration: calculation of the final posteriorProbability\n        for tag_dict in tmp_credible_set:\n            if total_posteriors != 0:\n                tag_dict[\"posteriorProbability\"] = float(\n                    tag_dict.get(\"relativePosteriorProbability\", 0) / total_posteriors\n                )\n            tag_dict.pop(\"relativePosteriorProbability\")\n            new_credible_set.append(tag_dict)\n        return new_credible_set\n\n    @classmethod\n    def finemap(\n        cls: type[PICS], associations: StudyLocus, k: float = 6.4\n    ) -> StudyLocus:\n\"\"\"Run PICS on a study locus.\n\n        !!! info \"Study locus needs to be LD annotated\"\n            The study locus needs to be LD annotated before PICS can be calculated.\n\n        Args:\n            associations (StudyLocus): Study locus to finemap using PICS\n            k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n        Returns:\n            StudyLocus: Study locus with PICS results\n        \"\"\"\n        # Register UDF by defining the structure of the output credibleSet array of structs\n        credset_schema = t.ArrayType(\n            [field.dataType.elementType for field in associations.schema if field.name == \"credibleSet\"][0]  # type: ignore\n        )\n        _finemap_udf = f.udf(\n            lambda credible_set, neglog_p: PICS._finemap(credible_set, neglog_p, k),\n            credset_schema,\n        )\n\n        associations.df = (\n            associations.df.withColumn(\"neglog_pvalue\", associations.neglog_pvalue())\n            .withColumn(\n                \"credibleSet\",\n                f.when(\n                    f.col(\"credibleSet\").isNotNull(),\n                    _finemap_udf(f.col(\"credibleSet\"), f.col(\"neglog_pvalue\")),\n                ),\n            )\n            .drop(\"neglog_pvalue\")\n        )\n        return associations\n
"},{"location":"components/method/pics/#otg.method.pics.PICS.finemap","title":"finemap(associations, k=6.4) classmethod","text":"

Run PICS on a study locus.

Study locus needs to be LD annotated

The study locus needs to be LD annotated before PICS can be calculated.

Parameters:

Name Type Description Default associations StudyLocus

Study locus to finemap using PICS

required k float

Empiric constant that can be adjusted to fit the curve, 6.4 recommended.

6.4

Returns:

Name Type Description StudyLocus StudyLocus

Study locus with PICS results

Source code in src/otg/method/pics.py
@classmethod\ndef finemap(\n    cls: type[PICS], associations: StudyLocus, k: float = 6.4\n) -> StudyLocus:\n\"\"\"Run PICS on a study locus.\n\n    !!! info \"Study locus needs to be LD annotated\"\n        The study locus needs to be LD annotated before PICS can be calculated.\n\n    Args:\n        associations (StudyLocus): Study locus to finemap using PICS\n        k (float): Empiric constant that can be adjusted to fit the curve, 6.4 recommended.\n\n    Returns:\n        StudyLocus: Study locus with PICS results\n    \"\"\"\n    # Register UDF by defining the structure of the output credibleSet array of structs\n    credset_schema = t.ArrayType(\n        [field.dataType.elementType for field in associations.schema if field.name == \"credibleSet\"][0]  # type: ignore\n    )\n    _finemap_udf = f.udf(\n        lambda credible_set, neglog_p: PICS._finemap(credible_set, neglog_p, k),\n        credset_schema,\n    )\n\n    associations.df = (\n        associations.df.withColumn(\"neglog_pvalue\", associations.neglog_pvalue())\n        .withColumn(\n            \"credibleSet\",\n            f.when(\n                f.col(\"credibleSet\").isNotNull(),\n                _finemap_udf(f.col(\"credibleSet\"), f.col(\"neglog_pvalue\")),\n            ),\n        )\n        .drop(\"neglog_pvalue\")\n    )\n    return associations\n
"},{"location":"components/method/window_based_clumping/","title":"Window-based clumping","text":"

Get semi-lead snps from summary statistics using a window based function.

Source code in src/otg/method/window_based_clumping.py
class WindowBasedClumping:\n\"\"\"Get semi-lead snps from summary statistics using a window based function.\"\"\"\n\n    @staticmethod\n    def _identify_cluster_peaks(\n        study: Column, chromosome: Column, position: Column, window_length: int\n    ) -> Column:\n\"\"\"Cluster GWAS significant variants, were clusters are separated by a defined distance.\n\n        !! Important to note that the length of the clusters can be arbitrarily big.\n\n        Args:\n            study (Column): study identifier\n            chromosome (Column): chromosome identifier\n            position (Column): position of the variant\n            window_length (int): window length in basepair\n\n        Returns:\n            Column: containing cluster identifier\n\n        Examples:\n            >>> data = [\n            ...     # Cluster 1:\n            ...     ('s1', 'chr1', 2),\n            ...     ('s1', 'chr1', 4),\n            ...     ('s1', 'chr1', 12),\n            ...     # Cluster 2 - Same chromosome:\n            ...     ('s1', 'chr1', 31),\n            ...     ('s1', 'chr1', 38),\n            ...     ('s1', 'chr1', 42),\n            ...     # Cluster 3 - New chromosome:\n            ...     ('s1', 'chr2', 41),\n            ...     ('s1', 'chr2', 44),\n            ...     ('s1', 'chr2', 50),\n            ...     # Cluster 4 - other study:\n            ...     ('s2', 'chr2', 55),\n            ...     ('s2', 'chr2', 62),\n            ...     ('s2', 'chr2', 70),\n            ... ]\n            >>> window_length = 10\n            >>> (\n            ...     spark.createDataFrame(data, ['studyId', 'chromosome', 'position'])\n            ...     .withColumn(\"cluster_id\",\n            ...         WindowBasedClumping._identify_cluster_peaks(\n            ...             f.col('studyId'),\n            ...             f.col('chromosome'),\n            ...             f.col('position'),\n            ...             window_length\n            ...         )\n            ...     ).show()\n            ... )\n            +-------+----------+--------+----------+\n            |studyId|chromosome|position|cluster_id|\n            +-------+----------+--------+----------+\n            |     s1|      chr1|       2| s1_chr1_2|\n            |     s1|      chr1|       4| s1_chr1_2|\n            |     s1|      chr1|      12| s1_chr1_2|\n            |     s1|      chr1|      31|s1_chr1_31|\n            |     s1|      chr1|      38|s1_chr1_31|\n            |     s1|      chr1|      42|s1_chr1_31|\n            |     s1|      chr2|      41|s1_chr2_41|\n            |     s1|      chr2|      44|s1_chr2_41|\n            |     s1|      chr2|      50|s1_chr2_41|\n            |     s2|      chr2|      55|s2_chr2_55|\n            |     s2|      chr2|      62|s2_chr2_55|\n            |     s2|      chr2|      70|s2_chr2_55|\n            +-------+----------+--------+----------+\n            <BLANKLINE>\n\n        \"\"\"\n        # By adding previous position, the cluster boundary can be identified:\n        previous_position = f.lag(position).over(\n            Window.partitionBy(study, chromosome).orderBy(position)\n        )\n        # We consider a cluster boudary if subsequent snps are further than the defined window:\n        cluster_id = f.when(\n            (previous_position.isNull())\n            | (position - previous_position > window_length),\n            f.concat_ws(\"_\", study, chromosome, position),\n        )\n        # The cluster identifier is propagated across every variant of the cluster:\n        return f.when(\n            cluster_id.isNull(),\n            f.last(cluster_id, ignorenulls=True).over(\n                Window.partitionBy(study, chromosome)\n                .orderBy(position)\n                .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n            ),\n        ).otherwise(cluster_id)\n\n    @staticmethod\n    @f.udf(VectorUDT())\n    def _find_peak(position: ndarray, window_size: int) -> DenseVector:\n\"\"\"Establish lead snps based on their positions listed by p-value.\n\n        The function `find_peak` assigns lead SNPs based on their positions listed by p-value within a specified window size.\n\n        Args:\n            position (ndarray): positions of the SNPs sorted by p-value.\n            window_size (int): the distance in bp within which associations are clumped together around the lead snp.\n\n        Returns:\n            DenseVector: binary vector where 1 indicates a lead SNP and 0 indicates a non-lead SNP.\n\n        Examples:\n            >>> from pyspark.ml import functions as fml\n            >>> data = [\n            ...     ('c', 3, 4.0, True),\n            ...     ('c', 4, 2.0, False),\n            ...     ('c', 6, 1.0, True),\n            ...     ('c', 8, 2.5, False),\n            ...     ('c', 9, 3.0, True)\n            ... ]\n            >>> (\n            ...     spark.createDataFrame(data, ['cluster', 'position', 'negLogPValue', 'isSemiIndex'])\n            ...     .withColumn(\n            ...        'collected_positions',\n            ...         f.collect_list(\n            ...             f.col('position'))\n            ...         .over(\n            ...             Window.partitionBy('cluster')\n            ...             .orderBy(f.col('negLogPValue').desc())\n            ...             .rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)\n            ...         )\n            ...     )\n            ...     .withColumn('isLeadList', WindowBasedClumping._find_peak(fml.array_to_vector(f.col('collected_positions')), f.lit(2)))\n            ...     .show(truncate=False)\n            ... )\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            |cluster|position|negLogPValue|isSemiIndex|collected_positions|isLeadList           |\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            |c      |3       |4.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |9       |3.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |8       |2.5         |false      |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |4       |2.0         |false      |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            |c      |6       |1.0         |true       |[3, 9, 8, 4, 6]    |[1.0,1.0,0.0,0.0,1.0]|\n            +-------+--------+------------+-----------+-------------------+---------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Initializing the lead list with zeroes:\n        is_lead: ndarray = np.zeros(len(position))\n\n        # List containing indices of leads:\n        lead_indices: list = []\n\n        # Looping through all positions:\n        for index in range(len(position)):\n            # Looping through leads to find out if they are within a window:\n            for lead_index in lead_indices:\n                # If any of the leads within the window:\n                if abs(position[lead_index] - position[index]) < window_size:\n                    # Skipping further checks:\n                    break\n            else:\n                # None of the leads were within the window:\n                lead_indices.append(index)\n                is_lead[index] = 1\n\n        return DenseVector(is_lead)\n\n    @staticmethod\n    def _filter_leads(clump: Column, window_length: int) -> Column:\n\"\"\"Filter lead snps from a column containing clumps with prioritised variants.\n\n        Args:\n            clump (Column): column containing array of structs with all variants in the clump sorted by priority.\n            window_length (int): window length in basepair\n\n        Returns:\n            Column: column containing array of structs with only lead variants.\n\n        Examples:\n            >>> data = [\n            ...     ('v6', 10),\n            ...     ('v4', 6),\n            ...     ('v1', 3),\n            ...     ('v2', 4),\n            ...     ('v3', 5),\n            ...     ('v5', 8),\n            ...     ('v7', 13),\n            ...     ('v8', 20)\n            ... ]\n            >>> window_length = 2\n            >>> (\n            ...    spark.createDataFrame(data, ['variantId', 'position']).withColumn(\"study\", f.lit(\"s1\"))\n            ...    .groupBy(\"study\")\n            ...    .agg(f.collect_list(f.struct(\"*\")).alias(\"clump\"))\n            ...    .select(WindowBasedClumping._filter_leads(f.col('clump'), window_length).alias(\"filtered_clump\"))\n            ...    .show(truncate=False)\n            ... )\n            +---------------------------------------------------------------------------------------------------------------+\n            |filtered_clump                                                                                                 |\n            +---------------------------------------------------------------------------------------------------------------+\n            |[{v6, 10, s1, 1.0}, {v4, 6, s1, 1.0}, {v1, 3, s1, 1.0}, {v5, 8, s1, 1.0}, {v7, 13, s1, 1.0}, {v8, 20, s1, 1.0}]|\n            +---------------------------------------------------------------------------------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        # Combine the lead position vector with the aggregated fields and dropping non-lead snps:\n        return f.filter(\n            f.zip_with(\n                clump,\n                # Extract the position vector and identify positions of the leads:\n                fml.vector_to_array(\n                    WindowBasedClumping._find_peak(\n                        fml.array_to_vector(f.transform(clump, lambda x: x.position)),\n                        f.lit(window_length),\n                    )\n                ),\n                lambda x, y: f.when(y == 1.0, x.withField(\"isLead\", y)),\n            ),\n            lambda col: col.isNotNull(),\n        )\n\n    @staticmethod\n    def _collect_clump(mantissa: Column, exponent: Column) -> Column:\n\"\"\"Collect clump into a sorted struct.\n\n        Args:\n            mantissa (Column): mantissa of the p-value\n            exponent (Column): exponent of the p-value\n\n        Returns:\n            Column: struct containing clumped variants sorted by negLogPValue in descending order\n\n        Examples:\n            >>> data = [\n            ...     ('clump_1', 2, 0.1, -1),\n            ...     ('clump_1', 4, 0.2, -1),\n            ...     ('clump_1', 12, 0.3, -1),\n            ...     ('clump_1', 31, 0.4, -1),\n            ...     ('clump_1', 38, 0.5, -1),\n            ...     ('clump_1', 42, 0.6, -1),\n            ...     ('clump_2', 41, 0.7, -1),\n            ...     ('clump_2', 44, 0.8, -1),\n            ...     ('clump_2', 50, 0.9, -1),\n            ...     ('clump_3', 55, 1.0, -1),\n            ...     ('clump_3', 62, 1.1, -1),\n            ...     ('clump_3', 70, 1.2, -1),\n            ... ]\n            >>> (\n            ...    spark.createDataFrame(data, ['clump_id', 'position', 'pValueMantissa', 'pValueExponent'])\n            ...     .groupBy('clump_id')\n            ...     .agg(WindowBasedClumping._collect_clump(\n            ...                 f.col('pValueMantissa'),\n            ...                 f.col('pValueExponent')\n            ...             ).alias(\"clump\")\n            ...     ).show(truncate=False)\n            ... )\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            |clump_id|clump                                                                                                                                                                                                                                                  |\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            |clump_1 |[{2.0, clump_1, 2, 0.1, -1}, {1.6989700043360187, clump_1, 4, 0.2, -1}, {1.5228787452803376, clump_1, 12, 0.3, -1}, {1.3979400086720375, clump_1, 31, 0.4, -1}, {1.3010299956639813, clump_1, 38, 0.5, -1}, {1.2218487496163564, clump_1, 42, 0.6, -1}]|\n            |clump_2 |[{1.154901959985743, clump_2, 41, 0.7, -1}, {1.0969100130080565, clump_2, 44, 0.8, -1}, {1.045757490560675, clump_2, 50, 0.9, -1}]                                                                                                                     |\n            |clump_3 |[{1.0, clump_3, 55, 1.0, -1}, {0.958607314841775, clump_3, 62, 1.1, -1}, {0.9208187539523752, clump_3, 70, 1.2, -1}]                                                                                                                                   |\n            +--------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n            <BLANKLINE>\n\n        \"\"\"\n        return f.sort_array(\n            f.collect_list(\n                f.struct(\n                    calculate_neglog_pvalue(mantissa, exponent).alias(\"negLogPValue\"),\n                    \"*\",\n                )\n            ),\n            False,\n        )\n\n    @classmethod\n    def clump(\n        cls: type[WindowBasedClumping],\n        summary_stats: SummaryStatistics,\n        window_length: int,\n    ) -> StudyLocus:\n\"\"\"Clump summary statistics by distance.\n\n        Args:\n            summary_stats (SummaryStatistics): summary statistics to clump\n            window_length (int): window length in basepair\n\n        Returns:\n            StudyLocus: clumped summary statistics\n        \"\"\"\n        return StudyLocus(\n            _df=summary_stats.df.withColumn(\n                \"cluster_id\",\n                # First identify clusters of variants within the window\n                WindowBasedClumping._identify_cluster_peaks(\n                    f.col(\"studyId\"),\n                    f.col(\"chromosome\"),\n                    f.col(\"position\"),\n                    window_length,\n                ),\n            )\n            .groupBy(\"cluster_id\")\n            # Aggregating all data from each cluster:\n            .agg(\n                WindowBasedClumping._collect_clump(\n                    f.col(\"pValueMantissa\"), f.col(\"pValueExponent\")\n                ).alias(\"clump\")\n            )\n            # Explode and identify the index variant representative of the cluster:\n            .withColumn(\n                \"exploded\",\n                f.explode(\n                    WindowBasedClumping._filter_leads(f.col(\"clump\"), window_length)\n                ),\n            )\n            .select(\"exploded.*\")\n            # Dropping helper columns:\n            .drop(\"isLead\", \"negLogPValue\", \"cluster_id\")\n            # assign study-locus id:\n            .withColumn(\"studyLocusId\", get_study_locus_id(\"studyId\", \"variantId\"))\n        )\n
"},{"location":"components/method/window_based_clumping/#otg.method.window_based_clumping.WindowBasedClumping.clump","title":"clump(summary_stats, window_length) classmethod","text":"

Clump summary statistics by distance.

Parameters:

Name Type Description Default summary_stats SummaryStatistics

summary statistics to clump

required window_length int

window length in basepair

required

Returns:

Name Type Description StudyLocus StudyLocus

clumped summary statistics

Source code in src/otg/method/window_based_clumping.py
@classmethod\ndef clump(\n    cls: type[WindowBasedClumping],\n    summary_stats: SummaryStatistics,\n    window_length: int,\n) -> StudyLocus:\n\"\"\"Clump summary statistics by distance.\n\n    Args:\n        summary_stats (SummaryStatistics): summary statistics to clump\n        window_length (int): window length in basepair\n\n    Returns:\n        StudyLocus: clumped summary statistics\n    \"\"\"\n    return StudyLocus(\n        _df=summary_stats.df.withColumn(\n            \"cluster_id\",\n            # First identify clusters of variants within the window\n            WindowBasedClumping._identify_cluster_peaks(\n                f.col(\"studyId\"),\n                f.col(\"chromosome\"),\n                f.col(\"position\"),\n                window_length,\n            ),\n        )\n        .groupBy(\"cluster_id\")\n        # Aggregating all data from each cluster:\n        .agg(\n            WindowBasedClumping._collect_clump(\n                f.col(\"pValueMantissa\"), f.col(\"pValueExponent\")\n            ).alias(\"clump\")\n        )\n        # Explode and identify the index variant representative of the cluster:\n        .withColumn(\n            \"exploded\",\n            f.explode(\n                WindowBasedClumping._filter_leads(f.col(\"clump\"), window_length)\n            ),\n        )\n        .select(\"exploded.*\")\n        # Dropping helper columns:\n        .drop(\"isLead\", \"negLogPValue\", \"cluster_id\")\n        # assign study-locus id:\n        .withColumn(\"studyLocusId\", get_study_locus_id(\"studyId\", \"variantId\"))\n    )\n
"},{"location":"components/step/colocalisation/","title":"Colocalisation","text":"

Bases: ColocalisationStepConfig

Colocalisation step.

This workflow runs colocalization analyses that assess the degree to which independent signals of the association share the same causal variant in a region of the genome, typically limited by linkage disequilibrium (LD).

Source code in src/otg/colocalisation.py
@dataclass\nclass ColocalisationStep(ColocalisationStepConfig):\n\"\"\"Colocalisation step.\n\n    This workflow runs colocalization analyses that assess the degree to which independent signals of the association share the same causal variant in a region of the genome, typically limited by linkage disequilibrium (LD).\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: ColocalisationStep) -> None:\n\"\"\"Run colocalisation step.\"\"\"\n        # Study-locus information\n        sl = StudyLocus.from_parquet(self.session, self.study_locus_path)\n        si = StudyIndex.from_parquet(self.session, self.study_index_path)\n\n        # Study-locus overlaps for 95% credible sets\n        sl_overlaps = sl.credible_set(CredibleInterval.IS95).overlaps(si)\n\n        coloc_results = Coloc.colocalise(\n            sl_overlaps, self.priorc1, self.priorc2, self.priorc12\n        )\n        ecaviar_results = ECaviar.colocalise(sl_overlaps)\n\n        coloc_results.df.unionByName(ecaviar_results.df, allowMissingColumns=True)\n\n        coloc_results.df.write.mode(self.session.write_mode).parquet(self.coloc_path)\n

Colocalisation step requirements.

Attributes:

Name Type Description study_locus_path DictConfig

Input Study-locus path.

coloc_path DictConfig

Output Colocalisation path.

priorc1 float

Prior on variant being causal for trait 1.

priorc2 float

Prior on variant being causal for trait 2.

priorc12 float

Prior on variant being causal for traits 1 and 2.

Source code in src/otg/config.py
@dataclass\nclass ColocalisationStepConfig:\n\"\"\"Colocalisation step requirements.\n\n    Attributes:\n        study_locus_path (DictConfig): Input Study-locus path.\n        coloc_path (DictConfig): Output Colocalisation path.\n        priorc1 (float): Prior on variant being causal for trait 1.\n        priorc2 (float): Prior on variant being causal for trait 2.\n        priorc12 (float): Prior on variant being causal for traits 1 and 2.\n    \"\"\"\n\n    _target_: str = \"otg.colocalisation.ColocalisationStep\"\n    study_locus_path: str = MISSING\n    study_index_path: str = MISSING\n    coloc_path: str = MISSING\n    priorc1: float = 1e-4\n    priorc2: float = 1e-4\n    priorc12: float = 1e-5\n
"},{"location":"components/step/colocalisation/#otg.colocalisation.ColocalisationStep.run","title":"run()","text":"

Run colocalisation step.

Source code in src/otg/colocalisation.py
def run(self: ColocalisationStep) -> None:\n\"\"\"Run colocalisation step.\"\"\"\n    # Study-locus information\n    sl = StudyLocus.from_parquet(self.session, self.study_locus_path)\n    si = StudyIndex.from_parquet(self.session, self.study_index_path)\n\n    # Study-locus overlaps for 95% credible sets\n    sl_overlaps = sl.credible_set(CredibleInterval.IS95).overlaps(si)\n\n    coloc_results = Coloc.colocalise(\n        sl_overlaps, self.priorc1, self.priorc2, self.priorc12\n    )\n    ecaviar_results = ECaviar.colocalise(sl_overlaps)\n\n    coloc_results.df.unionByName(ecaviar_results.df, allowMissingColumns=True)\n\n    coloc_results.df.write.mode(self.session.write_mode).parquet(self.coloc_path)\n
"},{"location":"components/step/finngen/","title":"FinnGen","text":"

Bases: FinnGenStepConfig

FinnGen study table ingestion step.

Source code in src/otg/finngen.py
@dataclass\nclass FinnGenStep(FinnGenStepConfig):\n\"\"\"FinnGen study table ingestion step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: FinnGenStep) -> None:\n\"\"\"Run FinnGen study table ingestion step.\"\"\"\n        # Read the JSON data from the URL.\n        json_data = urlopen(self.finngen_phenotype_table_url).read().decode(\"utf-8\")\n        rdd = self.session.spark.sparkContext.parallelize([json_data])\n        df = self.session.spark.read.json(rdd)\n\n        # Parse the study index data.\n        finngen_studies = StudyIndexFinnGen.from_source(\n            df,\n            self.finngen_release_prefix,\n            self.finngen_sumstat_url_prefix,\n            self.finngen_sumstat_url_suffix,\n        )\n\n        # Write the output.\n        finngen_studies.df.write.mode(self.session.write_mode).parquet(\n            self.finngen_study_index_out\n        )\n

FinnGen study table ingestion step requirements.

Attributes:

Name Type Description finngen_phenotype_table_url str

FinnGen API for fetching the list of studies.

finngen_release_prefix str

Release prefix pattern.

finngen_sumstat_url_prefix str

URL prefix for summary statistics location.

finngen_sumstat_url_suffix str

URL prefix suffix for summary statistics location.

finngen_study_index_out str

Output path for the FinnGen study index dataset.

Source code in src/otg/config.py
@dataclass\nclass FinnGenStepConfig:\n\"\"\"FinnGen study table ingestion step requirements.\n\n    Attributes:\n        finngen_phenotype_table_url (str): FinnGen API for fetching the list of studies.\n        finngen_release_prefix (str): Release prefix pattern.\n        finngen_sumstat_url_prefix (str): URL prefix for summary statistics location.\n        finngen_sumstat_url_suffix (str): URL prefix suffix for summary statistics location.\n        finngen_study_index_out (str): Output path for the FinnGen study index dataset.\n    \"\"\"\n\n    _target_: str = \"otg.finngen.FinnGenStep\"\n    finngen_phenotype_table_url: str = MISSING\n    finngen_release_prefix: str = MISSING\n    finngen_sumstat_url_prefix: str = MISSING\n    finngen_sumstat_url_suffix: str = MISSING\n    finngen_study_index_out: str = MISSING\n
"},{"location":"components/step/finngen/#otg.finngen.FinnGenStep.run","title":"run()","text":"

Run FinnGen study table ingestion step.

Source code in src/otg/finngen.py
def run(self: FinnGenStep) -> None:\n\"\"\"Run FinnGen study table ingestion step.\"\"\"\n    # Read the JSON data from the URL.\n    json_data = urlopen(self.finngen_phenotype_table_url).read().decode(\"utf-8\")\n    rdd = self.session.spark.sparkContext.parallelize([json_data])\n    df = self.session.spark.read.json(rdd)\n\n    # Parse the study index data.\n    finngen_studies = StudyIndexFinnGen.from_source(\n        df,\n        self.finngen_release_prefix,\n        self.finngen_sumstat_url_prefix,\n        self.finngen_sumstat_url_suffix,\n    )\n\n    # Write the output.\n    finngen_studies.df.write.mode(self.session.write_mode).parquet(\n        self.finngen_study_index_out\n    )\n
"},{"location":"components/step/gene_index/","title":"Gene index","text":"

Bases: GeneIndexStepConfig

Gene index step.

This step generates a gene index dataset from an Open Targets Platform target dataset.

Source code in src/otg/gene_index.py
@dataclass\nclass GeneIndexStep(GeneIndexStepConfig):\n\"\"\"Gene index step.\n\n    This step generates a gene index dataset from an Open Targets Platform target dataset.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: GeneIndexStep) -> None:\n\"\"\"Run Target index step.\"\"\"\n        # Extract\n        platform_target = self.session.spark.read.parquet(self.target_path)\n        # Transform\n        gene_index = GeneIndex.from_source(platform_target)\n        # Load\n        gene_index.df.write.mode(self.session.write_mode).parquet(self.gene_index_path)\n

Gene index step requirements.

Attributes:

Name Type Description target_path str

Open targets Platform target dataset path.

gene_index_path str

Output gene index path.

Source code in src/otg/config.py
@dataclass\nclass GeneIndexStepConfig:\n\"\"\"Gene index step requirements.\n\n    Attributes:\n        target_path (str): Open targets Platform target dataset path.\n        gene_index_path (str): Output gene index path.\n    \"\"\"\n\n    _target_: str = \"otg.gene_index.GeneIndexStep\"\n    target_path: str = MISSING\n    gene_index_path: str = MISSING\n
"},{"location":"components/step/gene_index/#otg.gene_index.GeneIndexStep.run","title":"run()","text":"

Run Target index step.

Source code in src/otg/gene_index.py
def run(self: GeneIndexStep) -> None:\n\"\"\"Run Target index step.\"\"\"\n    # Extract\n    platform_target = self.session.spark.read.parquet(self.target_path)\n    # Transform\n    gene_index = GeneIndex.from_source(platform_target)\n    # Load\n    gene_index.df.write.mode(self.session.write_mode).parquet(self.gene_index_path)\n
"},{"location":"components/step/gwas_catalog/","title":"GWAS Catalog","text":"

Bases: GWASCatalogStepConfig

GWAS Catalog step.

Source code in src/otg/gwas_catalog.py
@dataclass\nclass GWASCatalogStep(GWASCatalogStepConfig):\n\"\"\"GWAS Catalog step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: GWASCatalogStep) -> None:\n\"\"\"Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.\"\"\"\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n        # All inputs:\n        # Variant annotation dataset\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n        # GWAS Catalog raw study information\n        catalog_studies = self.session.spark.read.csv(\n            self.catalog_studies_file, sep=\"\\t\", header=True\n        )\n        # GWAS Catalog ancestry information\n        ancestry_lut = self.session.spark.read.csv(\n            self.catalog_ancestry_file, sep=\"\\t\", header=True\n        )\n        # GWAS Catalog summary statistics information\n        sumstats_lut = self.session.spark.read.csv(\n            self.catalog_sumstats_lut, sep=\"\\t\", header=False\n        )\n        # GWAS Catalog raw association information\n        catalog_associations = self.session.spark.read.csv(\n            self.catalog_associations_file, sep=\"\\t\", header=True\n        )\n\n        # Transform:\n        # GWAS Catalog study index and study-locus splitted\n        study_index, study_locus = GWASCatalogSplitter.split(\n            StudyIndexGWASCatalog.from_source(\n                catalog_studies, ancestry_lut, sumstats_lut\n            ),\n            StudyLocusGWASCatalog.from_source(catalog_associations, va),\n        )\n\n        # Annotate LD information\n        study_locus = study_locus.annotate_ld(\n            self.session,\n            study_index,\n            self.ld_populations,\n            self.ld_index_template,\n            self.ld_matrix_template,\n            self.min_r2,\n        )\n\n        # Fine-mapping LD-clumped study-locus using PICS\n        finemapped_study_locus = (\n            PICS.finemap(study_locus).annotate_credible_sets().clump()\n        )\n\n        # Write:\n        study_index.df.write.mode(self.session.write_mode).parquet(\n            self.catalog_studies_out\n        )\n        finemapped_study_locus.df.write.mode(self.session.write_mode).parquet(\n            self.catalog_associations_out\n        )\n

GWAS Catalog step requirements.

Attributes:

Name Type Description catalog_studies_file str

Raw GWAS catalog studies file.

catalog_ancestry_file str

Ancestry annotations file from GWAS Catalog.

catalog_sumstats_lut str

GWAS Catalog summary statistics lookup table.

catalog_associations_file str

Raw GWAS catalog associations file.

variant_annotation_path str

Input variant annotation path.

ld_populations list

List of populations to include.

min_r2 float

Minimum r2 to consider when considering variants within a window.

catalog_studies_out str

Output GWAS catalog studies path.

catalog_associations_out str

Output GWAS catalog associations path.

Source code in src/otg/config.py
@dataclass\nclass GWASCatalogStepConfig:\n\"\"\"GWAS Catalog step requirements.\n\n    Attributes:\n        catalog_studies_file (str): Raw GWAS catalog studies file.\n        catalog_ancestry_file (str): Ancestry annotations file from GWAS Catalog.\n        catalog_sumstats_lut (str): GWAS Catalog summary statistics lookup table.\n        catalog_associations_file (str): Raw GWAS catalog associations file.\n        variant_annotation_path (str): Input variant annotation path.\n        ld_populations (list): List of populations to include.\n        min_r2 (float): Minimum r2 to consider when considering variants within a window.\n        catalog_studies_out (str): Output GWAS catalog studies path.\n        catalog_associations_out (str): Output GWAS catalog associations path.\n    \"\"\"\n\n    _target_: str = \"otg.gwas_catalog.GWASCatalogStep\"\n    catalog_studies_file: str = MISSING\n    catalog_ancestry_file: str = MISSING\n    catalog_sumstats_lut: str = MISSING\n    catalog_associations_file: str = MISSING\n    variant_annotation_path: str = MISSING\n    min_r2: float = 0.5\n    ld_matrix_template: str = MISSING\n    ld_index_template: str = MISSING\n    ld_populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"nwe\",  # Northwestern European\n            \"seu\",  # Southeastern European\n        ]\n    )\n    catalog_studies_out: str = MISSING\n    catalog_associations_out: str = MISSING\n
"},{"location":"components/step/gwas_catalog/#otg.gwas_catalog.GWASCatalogStep.run","title":"run()","text":"

Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.

Source code in src/otg/gwas_catalog.py
def run(self: GWASCatalogStep) -> None:\n\"\"\"Run GWAS Catalog ingestion step to extract GWASCatalog Study and StudyLocus tables.\"\"\"\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n    # All inputs:\n    # Variant annotation dataset\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n    # GWAS Catalog raw study information\n    catalog_studies = self.session.spark.read.csv(\n        self.catalog_studies_file, sep=\"\\t\", header=True\n    )\n    # GWAS Catalog ancestry information\n    ancestry_lut = self.session.spark.read.csv(\n        self.catalog_ancestry_file, sep=\"\\t\", header=True\n    )\n    # GWAS Catalog summary statistics information\n    sumstats_lut = self.session.spark.read.csv(\n        self.catalog_sumstats_lut, sep=\"\\t\", header=False\n    )\n    # GWAS Catalog raw association information\n    catalog_associations = self.session.spark.read.csv(\n        self.catalog_associations_file, sep=\"\\t\", header=True\n    )\n\n    # Transform:\n    # GWAS Catalog study index and study-locus splitted\n    study_index, study_locus = GWASCatalogSplitter.split(\n        StudyIndexGWASCatalog.from_source(\n            catalog_studies, ancestry_lut, sumstats_lut\n        ),\n        StudyLocusGWASCatalog.from_source(catalog_associations, va),\n    )\n\n    # Annotate LD information\n    study_locus = study_locus.annotate_ld(\n        self.session,\n        study_index,\n        self.ld_populations,\n        self.ld_index_template,\n        self.ld_matrix_template,\n        self.min_r2,\n    )\n\n    # Fine-mapping LD-clumped study-locus using PICS\n    finemapped_study_locus = (\n        PICS.finemap(study_locus).annotate_credible_sets().clump()\n    )\n\n    # Write:\n    study_index.df.write.mode(self.session.write_mode).parquet(\n        self.catalog_studies_out\n    )\n    finemapped_study_locus.df.write.mode(self.session.write_mode).parquet(\n        self.catalog_associations_out\n    )\n
"},{"location":"components/step/gwas_catalog_sumstat_preprocess/","title":"GWAS Catalog sumstat preprocess","text":"

Bases: GWASCatalogSumstatsPreprocessConfig

Step to preprocess GWAS Catalog harmonised summary stats.

Source code in src/otg/gwas_catalog_sumstat_preprocess.py
@dataclass\nclass GWASCatalogSumstatsPreprocessStep(GWASCatalogSumstatsPreprocessConfig):\n\"\"\"Step to preprocess GWAS Catalog harmonised summary stats.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: GWASCatalogSumstatsPreprocessStep) -> None:\n\"\"\"Run Step.\"\"\"\n        # Extract\n        self.session.logger.info(self.raw_sumstats_path)\n        self.session.logger.info(self.out_sumstats_path)\n        self.session.logger.info(self.study_id)\n\n        # Reading dataset:\n        raw_dataset = self.session.spark.read.csv(\n            self.raw_sumstats_path, header=True, sep=\"\\t\"\n        )\n        self.session.logger.info(\n            f\"Number of single point associations: {raw_dataset.count()}\"\n        )\n\n        # Processing dataset:\n        SummaryStatistics.from_gwas_harmonized_summary_stats(\n            raw_dataset, self.study_id\n        ).df.write.mode(self.session.write_mode).parquet(self.out_sumstats_path)\n        self.session.logger.info(\"Processing dataset successfully completed.\")\n

GWAS Catalog Sumstats Preprocessing step requirements.

Attributes:

Name Type Description raw_sumstats_path str

Input raw GWAS Catalog summary statistics path.

out_sumstats_path str

Output GWAS Catalog summary statistics path.

study_id str

GWAS Catalog study identifier.

Source code in src/otg/config.py
@dataclass\nclass GWASCatalogSumstatsPreprocessConfig:\n\"\"\"GWAS Catalog Sumstats Preprocessing step requirements.\n\n    Attributes:\n        raw_sumstats_path (str): Input raw GWAS Catalog summary statistics path.\n        out_sumstats_path (str): Output GWAS Catalog summary statistics path.\n        study_id (str): GWAS Catalog study identifier.\n    \"\"\"\n\n    _target_: str = (\n        \"otg.gwas_catalog_sumstat_preprocess.GWASCatalogSumstatsPreprocessStep\"\n    )\n    raw_sumstats_path: str = MISSING\n    out_sumstats_path: str = MISSING\n    study_id: str = MISSING\n
"},{"location":"components/step/gwas_catalog_sumstat_preprocess/#otg.gwas_catalog_sumstat_preprocess.GWASCatalogSumstatsPreprocessStep.run","title":"run()","text":"

Run Step.

Source code in src/otg/gwas_catalog_sumstat_preprocess.py
def run(self: GWASCatalogSumstatsPreprocessStep) -> None:\n\"\"\"Run Step.\"\"\"\n    # Extract\n    self.session.logger.info(self.raw_sumstats_path)\n    self.session.logger.info(self.out_sumstats_path)\n    self.session.logger.info(self.study_id)\n\n    # Reading dataset:\n    raw_dataset = self.session.spark.read.csv(\n        self.raw_sumstats_path, header=True, sep=\"\\t\"\n    )\n    self.session.logger.info(\n        f\"Number of single point associations: {raw_dataset.count()}\"\n    )\n\n    # Processing dataset:\n    SummaryStatistics.from_gwas_harmonized_summary_stats(\n        raw_dataset, self.study_id\n    ).df.write.mode(self.session.write_mode).parquet(self.out_sumstats_path)\n    self.session.logger.info(\"Processing dataset successfully completed.\")\n
"},{"location":"components/step/ld_index/","title":"LD index","text":"

Bases: LDIndexStepConfig

LD index step.

Source code in src/otg/ld_index.py
@dataclass\nclass LDIndexStep(LDIndexStepConfig):\n\"\"\"LD index step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: LDIndexStep) -> None:\n\"\"\"Run LD index step.\"\"\"\n        # init hail session\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n        for population in self.ld_populations:\n            self.session.logger.info(f\"Processing population: {population}\")\n            ld_index = LDIndex.create(\n                self.ld_index_raw_template.format(POP=population),\n                self.ld_radius,\n                self.grch37_to_grch38_chain_path,\n            )\n\n            self.session.logger.info(\n                f\"Writing ls index to: {self.ld_index_template.format(POP=population)}\"\n            )\n            (\n                ld_index.df.write.partitionBy(\"chromosome\")\n                .mode(self.session.write_mode)\n                .parquet(self.ld_index_template.format(POP=population))  # noqa: FS002\n            )\n

LD index step requirements.

Attributes:

Name Type Description pop_ldindex_path str

Input population LD index file from gnomAD.

ld_radius int

Window radius around locus.

grch37_to_grch38_chain_path str

Path to GRCh37 to GRCh38 chain file.

ld_index_path str

Output LD index path.

Source code in src/otg/config.py
@dataclass\nclass LDIndexStepConfig:\n\"\"\"LD index step requirements.\n\n    Attributes:\n        pop_ldindex_path (str): Input population LD index file from gnomAD.\n        ld_radius (int): Window radius around locus.\n        grch37_to_grch38_chain_path (str): Path to GRCh37 to GRCh38 chain file.\n        ld_index_path (str): Output LD index path.\n    \"\"\"\n\n    _target_: str = \"otg.ld_index.LDIndexStep\"\n    ld_index_raw_template: str = \"gs://gcp-public-data--gnomad/release/2.1.1/ld/gnomad.genomes.r2.1.1.{POP}.common.ld.variant_indices.ht\"\n    ld_radius: int = 500_000\n    grch37_to_grch38_chain_path: str = MISSING\n    ld_index_template: str = MISSING\n    ld_populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"nwe\",  # Northwestern European\n            \"seu\",  # Southeastern European\n        ]\n    )\n
"},{"location":"components/step/ld_index/#otg.ld_index.LDIndexStep.run","title":"run()","text":"

Run LD index step.

Source code in src/otg/ld_index.py
def run(self: LDIndexStep) -> None:\n\"\"\"Run LD index step.\"\"\"\n    # init hail session\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n    for population in self.ld_populations:\n        self.session.logger.info(f\"Processing population: {population}\")\n        ld_index = LDIndex.create(\n            self.ld_index_raw_template.format(POP=population),\n            self.ld_radius,\n            self.grch37_to_grch38_chain_path,\n        )\n\n        self.session.logger.info(\n            f\"Writing ls index to: {self.ld_index_template.format(POP=population)}\"\n        )\n        (\n            ld_index.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.ld_index_template.format(POP=population))  # noqa: FS002\n        )\n
"},{"location":"components/step/ukbiobank/","title":"UKBiobank","text":"

Bases: UKBiobankStepConfig

UKBiobank study table ingestion step.

Source code in src/otg/ukbiobank.py
@dataclass\nclass UKBiobankStep(UKBiobankStepConfig):\n\"\"\"UKBiobank study table ingestion step.\"\"\"\n\n    session: Session = Session()\n\n    def run(self: UKBiobankStep) -> None:\n\"\"\"Run UKBiobank study table ingestion step.\"\"\"\n        # Read in the UKBiobank manifest tsv file.\n        df = self.session.spark.read.csv(\n            self.ukbiobank_manifest, sep=\"\\t\", header=True, inferSchema=True\n        )\n\n        # Parse the study index data.\n        ukbiobank_study_index = StudyIndexUKBiobank.from_source(df)\n\n        # Write the output.\n        ukbiobank_study_index.df.write.mode(self.session.write_mode).parquet(\n            self.ukbiobank_study_index_out\n        )\n

UKBiobank study table ingestion step requirements.

Attributes:

Name Type Description ukbiobank_manifest str

UKBiobank manifest of studies.

ukbiobank_study_index_out str

Output path for the UKBiobank study index dataset.

Source code in src/otg/config.py
@dataclass\nclass UKBiobankStepConfig:\n\"\"\"UKBiobank study table ingestion step requirements.\n\n    Attributes:\n        ukbiobank_manifest (str): UKBiobank manifest of studies.\n        ukbiobank_study_index_out (str): Output path for the UKBiobank study index dataset.\n    \"\"\"\n\n    _target_: str = \"otg.ukbiobank.UKBiobankStep\"\n    ukbiobank_manifest: str = MISSING\n    ukbiobank_study_index_out: str = MISSING\n
"},{"location":"components/step/ukbiobank/#otg.ukbiobank.UKBiobankStep.run","title":"run()","text":"

Run UKBiobank study table ingestion step.

Source code in src/otg/ukbiobank.py
def run(self: UKBiobankStep) -> None:\n\"\"\"Run UKBiobank study table ingestion step.\"\"\"\n    # Read in the UKBiobank manifest tsv file.\n    df = self.session.spark.read.csv(\n        self.ukbiobank_manifest, sep=\"\\t\", header=True, inferSchema=True\n    )\n\n    # Parse the study index data.\n    ukbiobank_study_index = StudyIndexUKBiobank.from_source(df)\n\n    # Write the output.\n    ukbiobank_study_index.df.write.mode(self.session.write_mode).parquet(\n        self.ukbiobank_study_index_out\n    )\n
"},{"location":"components/step/variant_annotation_step/","title":"Variant annotation","text":"

Bases: VariantAnnotationStepConfig

Variant annotation step.

Variant annotation step produces a dataset of the type VariantAnnotation derived from gnomADs gnomad.genomes.vX.X.X.sites.ht Hail's table. This dataset is used to validate variants and as a source of annotation.

Source code in src/otg/variant_annotation.py
@dataclass\nclass VariantAnnotationStep(VariantAnnotationStepConfig):\n\"\"\"Variant annotation step.\n\n    Variant annotation step produces a dataset of the type `VariantAnnotation` derived from gnomADs `gnomad.genomes.vX.X.X.sites.ht` Hail's table. This dataset is used to validate variants and as a source of annotation.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: VariantAnnotationStep) -> None:\n\"\"\"Run variant annotation step.\"\"\"\n        # init hail session\n        hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n\"\"\"Run variant annotation step.\"\"\"\n        variant_annotation = VariantAnnotation.from_gnomad(\n            self.gnomad_genomes,\n            self.chain_38_to_37,\n            self.populations,\n        )\n        # Writing data partitioned by chromosome and position:\n        (\n            variant_annotation.df.repartition(400, \"chromosome\")\n            .sortWithinPartitions(\"chromosome\", \"position\")\n            .write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.variant_annotation_path)\n        )\n

Variant annotation step requirements.

Attributes:

Name Type Description gnomad_genomes str

Path to gnomAD genomes hail table.

chain_38_to_37 str

Path to GRCh38 to GRCh37 chain file.

variant_annotation_path str

Output variant annotation path.

populations List[str]

List of populations to include.

Source code in src/otg/config.py
@dataclass\nclass VariantAnnotationStepConfig:\n\"\"\"Variant annotation step requirements.\n\n    Attributes:\n        gnomad_genomes (str): Path to gnomAD genomes hail table.\n        chain_38_to_37 (str): Path to GRCh38 to GRCh37 chain file.\n        variant_annotation_path (str): Output variant annotation path.\n        populations (List[str]): List of populations to include.\n    \"\"\"\n\n    _target_: str = \"otg.variant_annotation.VariantAnnotationStep\"\n    gnomad_genomes: str = MISSING\n    chain_38_to_37: str = MISSING\n    variant_annotation_path: str = MISSING\n    populations: List[str] = field(\n        default_factory=lambda: [\n            \"afr\",  # African-American\n            \"amr\",  # American Admixed/Latino\n            \"ami\",  # Amish ancestry\n            \"asj\",  # Ashkenazi Jewish\n            \"eas\",  # East Asian\n            \"fin\",  # Finnish\n            \"nfe\",  # Non-Finnish European\n            \"mid\",  # Middle Eastern\n            \"sas\",  # South Asian\n            \"oth\",  # Other\n        ]\n    )\n
"},{"location":"components/step/variant_annotation_step/#otg.variant_annotation.VariantAnnotationStep.run","title":"run()","text":"

Run variant annotation step.

Source code in src/otg/variant_annotation.py
def run(self: VariantAnnotationStep) -> None:\n\"\"\"Run variant annotation step.\"\"\"\n    # init hail session\n    hl.init(sc=self.session.spark.sparkContext, log=\"/dev/null\")\n\n\"\"\"Run variant annotation step.\"\"\"\n    variant_annotation = VariantAnnotation.from_gnomad(\n        self.gnomad_genomes,\n        self.chain_38_to_37,\n        self.populations,\n    )\n    # Writing data partitioned by chromosome and position:\n    (\n        variant_annotation.df.repartition(400, \"chromosome\")\n        .sortWithinPartitions(\"chromosome\", \"position\")\n        .write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.variant_annotation_path)\n    )\n
"},{"location":"components/step/variant_index_step/","title":"Variant index","text":"

Bases: VariantIndexStepConfig

Variant index step.

Using a VariantAnnotation dataset as a reference, this step creates and writes a dataset of the type VariantIndex that includes only variants that have disease-association data with a reduced set of annotations.

Source code in src/otg/variant_index.py
@dataclass\nclass VariantIndexStep(VariantIndexStepConfig):\n\"\"\"Variant index step.\n\n    Using a `VariantAnnotation` dataset as a reference, this step creates and writes a dataset of the type `VariantIndex` that includes only variants that have disease-association data with a reduced set of annotations.\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: VariantIndexStep) -> None:\n\"\"\"Run variant index step.\"\"\"\n        # Variant annotation dataset\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n\n        # Study-locus dataset\n        study_locus = StudyLocus.from_parquet(self.session, self.study_locus_path)\n\n        # Reduce scope of variant annotation dataset to only variants in study-locus sets:\n        va_slimmed = va.filter_by_variant_df(\n            study_locus.unique_lead_tag_variants(), [\"id\", \"chromosome\"]\n        )\n\n        # Generate variant index ussing a subset of the variant annotation dataset\n        vi = VariantIndex.from_variant_annotation(va_slimmed)\n\n        # Write data:\n        # self.etl.logger.info(\n        #     f\"Writing invalid variants from the credible set to: {self.variant_invalid}\"\n        # )\n        # vi.invalid_variants.write.mode(self.etl.write_mode).parquet(\n        #     self.variant_invalid\n        # )\n\n        self.session.logger.info(f\"Writing variant index to: {self.variant_index_path}\")\n        (\n            vi.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.variant_index_path)\n        )\n

Variant index step requirements.

Attributes:

Name Type Description variant_annotation_path str

Input variant annotation path.

study_locus_path str

Input study-locus path.

variant_index_path str

Output variant index path.

Source code in src/otg/config.py
@dataclass\nclass VariantIndexStepConfig:\n\"\"\"Variant index step requirements.\n\n    Attributes:\n        variant_annotation_path (str): Input variant annotation path.\n        study_locus_path (str): Input study-locus path.\n        variant_index_path (str): Output variant index path.\n    \"\"\"\n\n    _target_: str = \"otg.variant_index.VariantIndexStep\"\n    variant_annotation_path: str = MISSING\n    study_locus_path: str = MISSING\n    variant_index_path: str = MISSING\n
"},{"location":"components/step/variant_index_step/#otg.variant_index.VariantIndexStep.run","title":"run()","text":"

Run variant index step.

Source code in src/otg/variant_index.py
def run(self: VariantIndexStep) -> None:\n\"\"\"Run variant index step.\"\"\"\n    # Variant annotation dataset\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n\n    # Study-locus dataset\n    study_locus = StudyLocus.from_parquet(self.session, self.study_locus_path)\n\n    # Reduce scope of variant annotation dataset to only variants in study-locus sets:\n    va_slimmed = va.filter_by_variant_df(\n        study_locus.unique_lead_tag_variants(), [\"id\", \"chromosome\"]\n    )\n\n    # Generate variant index ussing a subset of the variant annotation dataset\n    vi = VariantIndex.from_variant_annotation(va_slimmed)\n\n    # Write data:\n    # self.etl.logger.info(\n    #     f\"Writing invalid variants from the credible set to: {self.variant_invalid}\"\n    # )\n    # vi.invalid_variants.write.mode(self.etl.write_mode).parquet(\n    #     self.variant_invalid\n    # )\n\n    self.session.logger.info(f\"Writing variant index to: {self.variant_index_path}\")\n    (\n        vi.df.write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.variant_index_path)\n    )\n
"},{"location":"components/step/variant_to_gene_step/","title":"V2G","text":"

Bases: V2GStepConfig

Variant-to-gene (V2G) step.

This step aims to generate a dataset that contains multiple pieces of evidence supporting the functional association of specific variants with genes. Some of the evidence types include:

  1. Chromatin interaction experiments, e.g. Promoter Capture Hi-C (PCHi-C).
  2. In silico functional predictions, e.g. Variant Effect Predictor (VEP) from Ensembl.
  3. Distance between the variant and each gene's canonical transcription start site (TSS).
Source code in src/otg/v2g.py
@dataclass\nclass V2GStep(V2GStepConfig):\n\"\"\"Variant-to-gene (V2G) step.\n\n    This step aims to generate a dataset that contains multiple pieces of evidence supporting the functional association of specific variants with genes. Some of the evidence types include:\n\n    1. Chromatin interaction experiments, e.g. Promoter Capture Hi-C (PCHi-C).\n    2. In silico functional predictions, e.g. Variant Effect Predictor (VEP) from Ensembl.\n    3. Distance between the variant and each gene's canonical transcription start site (TSS).\n\n    \"\"\"\n\n    session: Session = Session()\n\n    def run(self: V2GStep) -> None:\n\"\"\"Run V2G dataset generation.\"\"\"\n        # Filter gene index by approved biotypes to define V2G gene universe\n        gene_index_filtered = GeneIndex.from_parquet(\n            self.session, self.gene_index_path\n        ).filter_by_biotypes(self.approved_biotypes)\n\n        vi = VariantIndex.from_parquet(self.session, self.variant_index_path).persist()\n        va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n        vep_consequences = self.session.spark.read.csv(\n            self.vep_consequences_path, sep=\"\\t\", header=True\n        )\n\n        # Variant annotation reduced to the variant index to define V2G variant universe\n        va_slimmed = va.filter_by_variant_df(vi.df, [\"id\", \"chromosome\"]).persist()\n\n        # lift over variants to hg38\n        lift = LiftOverSpark(\n            self.liftover_chain_file_path, self.liftover_max_length_difference\n        )\n\n        v2g_datasets = [\n            va_slimmed.get_distance_to_tss(gene_index_filtered, self.max_distance),\n            # variant effects\n            va_slimmed.get_most_severe_vep_v2g(vep_consequences, gene_index_filtered),\n            va_slimmed.get_polyphen_v2g(gene_index_filtered),\n            va_slimmed.get_sift_v2g(gene_index_filtered),\n            va_slimmed.get_plof_v2g(gene_index_filtered),\n            # intervals\n            Intervals.parse_andersson(\n                self.session, self.anderson_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_javierre(\n                self.session, self.javierre_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_jung(\n                self.session, self.jung_path, gene_index_filtered, lift\n            ).v2g(vi),\n            Intervals.parse_thurman(\n                self.session, self.thurnman_path, gene_index_filtered, lift\n            ).v2g(vi),\n        ]\n\n        # merge all V2G datasets\n        v2g = V2G(\n            _df=reduce(\n                lambda x, y: x.unionByName(y, allowMissingColumns=True),\n                [dataset.df for dataset in v2g_datasets],\n            ).repartition(\"chromosome\")\n        )\n        # write V2G dataset\n        (\n            v2g.df.write.partitionBy(\"chromosome\")\n            .mode(self.session.write_mode)\n            .parquet(self.v2g_path)\n        )\n

Variant to gene (V2G) step requirements.

Attributes:

Name Type Description variant_index_path str

Input variant index path.

variant_annotation_path str

Input variant annotation path.

gene_index_path str

Input gene index path.

vep_consequences_path str

Input VEP consequences path.

lift_over_chain_file_path str

Path to GRCh37 to GRCh38 chain file.

approved_biotypes list[str]

List of approved biotypes.

anderson_path str

Anderson intervals path.

javierre_path str

Javierre intervals path.

jung_path str

Jung intervals path.

thurnman_path str

Thurnman intervals path.

liftover_max_length_difference int

Maximum length difference for liftover.

max_distance int

Maximum distance to consider.

output_path str

Output V2G path.

Source code in src/otg/config.py
@dataclass\nclass V2GStepConfig:\n\"\"\"Variant to gene (V2G) step requirements.\n\n    Attributes:\n        variant_index_path (str): Input variant index path.\n        variant_annotation_path (str): Input variant annotation path.\n        gene_index_path (str): Input gene index path.\n        vep_consequences_path (str): Input VEP consequences path.\n        lift_over_chain_file_path (str): Path to GRCh37 to GRCh38 chain file.\n        approved_biotypes (list[str]): List of approved biotypes.\n        anderson_path (str): Anderson intervals path.\n        javierre_path (str): Javierre intervals path.\n        jung_path (str): Jung intervals path.\n        thurnman_path (str): Thurnman intervals path.\n        liftover_max_length_difference (int): Maximum length difference for liftover.\n        max_distance (int): Maximum distance to consider.\n        output_path (str): Output V2G path.\n    \"\"\"\n\n    _target_: str = \"otg.v2g.V2GStep\"\n    variant_index_path: str = MISSING\n    variant_annotation_path: str = MISSING\n    gene_index_path: str = MISSING\n    vep_consequences_path: str = MISSING\n    liftover_chain_file_path: str = MISSING\n    anderson_path: str = MISSING\n    javierre_path: str = MISSING\n    jung_path: str = MISSING\n    thurnman_path: str = MISSING\n    liftover_max_length_difference: int = 100\n    max_distance: int = 500_000\n    v2g_path: str = MISSING\n    approved_biotypes: List[str] = field(\n        default_factory=lambda: [\n            \"protein_coding\",\n            \"3prime_overlapping_ncRNA\",\n            \"antisense\",\n            \"bidirectional_promoter_lncRNA\",\n            \"IG_C_gene\",\n            \"IG_D_gene\",\n            \"IG_J_gene\",\n            \"IG_V_gene\",\n            \"lincRNA\",\n            \"macro_lncRNA\",\n            \"non_coding\",\n            \"sense_intronic\",\n            \"sense_overlapping\",\n        ]\n    )\n
"},{"location":"components/step/variant_to_gene_step/#otg.v2g.V2GStep.run","title":"run()","text":"

Run V2G dataset generation.

Source code in src/otg/v2g.py
def run(self: V2GStep) -> None:\n\"\"\"Run V2G dataset generation.\"\"\"\n    # Filter gene index by approved biotypes to define V2G gene universe\n    gene_index_filtered = GeneIndex.from_parquet(\n        self.session, self.gene_index_path\n    ).filter_by_biotypes(self.approved_biotypes)\n\n    vi = VariantIndex.from_parquet(self.session, self.variant_index_path).persist()\n    va = VariantAnnotation.from_parquet(self.session, self.variant_annotation_path)\n    vep_consequences = self.session.spark.read.csv(\n        self.vep_consequences_path, sep=\"\\t\", header=True\n    )\n\n    # Variant annotation reduced to the variant index to define V2G variant universe\n    va_slimmed = va.filter_by_variant_df(vi.df, [\"id\", \"chromosome\"]).persist()\n\n    # lift over variants to hg38\n    lift = LiftOverSpark(\n        self.liftover_chain_file_path, self.liftover_max_length_difference\n    )\n\n    v2g_datasets = [\n        va_slimmed.get_distance_to_tss(gene_index_filtered, self.max_distance),\n        # variant effects\n        va_slimmed.get_most_severe_vep_v2g(vep_consequences, gene_index_filtered),\n        va_slimmed.get_polyphen_v2g(gene_index_filtered),\n        va_slimmed.get_sift_v2g(gene_index_filtered),\n        va_slimmed.get_plof_v2g(gene_index_filtered),\n        # intervals\n        Intervals.parse_andersson(\n            self.session, self.anderson_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_javierre(\n            self.session, self.javierre_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_jung(\n            self.session, self.jung_path, gene_index_filtered, lift\n        ).v2g(vi),\n        Intervals.parse_thurman(\n            self.session, self.thurnman_path, gene_index_filtered, lift\n        ).v2g(vi),\n    ]\n\n    # merge all V2G datasets\n    v2g = V2G(\n        _df=reduce(\n            lambda x, y: x.unionByName(y, allowMissingColumns=True),\n            [dataset.df for dataset in v2g_datasets],\n        ).repartition(\"chromosome\")\n    )\n    # write V2G dataset\n    (\n        v2g.df.write.partitionBy(\"chromosome\")\n        .mode(self.session.write_mode)\n        .parquet(self.v2g_path)\n    )\n
"}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 1057ad0e254034ced35219bdedc010b28884abd3..e9db2912a8479be443d6a1407abdeac9e660d9d4 100644 GIT binary patch delta 14 Vcmcc3c$<+;zMF$1G-@K-RRAI!1e5>( delta 14 Vcmcc3c$<+;zMF$%mF`5gs{kVr1iSzM