| 
 | 1 | +#[cfg(test)]  | 
 | 2 | +mod tests;  | 
 | 3 | + | 
 | 4 | +use std::collections::BTreeMap;  | 
 | 5 | + | 
 | 6 | +use serde_yaml::Value;  | 
 | 7 | + | 
 | 8 | +use crate::GitHubContext;  | 
 | 9 | + | 
 | 10 | +/// Representation of a job loaded from the `src/ci/github-actions/jobs.yml` file.  | 
 | 11 | +#[derive(serde::Deserialize, Debug, Clone)]  | 
 | 12 | +pub struct Job {  | 
 | 13 | +    /// Name of the job, e.g. mingw-check  | 
 | 14 | +    pub name: String,  | 
 | 15 | +    /// GitHub runner on which the job should be executed  | 
 | 16 | +    pub os: String,  | 
 | 17 | +    pub env: BTreeMap<String, Value>,  | 
 | 18 | +    /// Should the job be only executed on a specific channel?  | 
 | 19 | +    #[serde(default)]  | 
 | 20 | +    pub only_on_channel: Option<String>,  | 
 | 21 | +    /// Do not cancel the whole workflow if this job fails.  | 
 | 22 | +    #[serde(default)]  | 
 | 23 | +    pub continue_on_error: Option<bool>,  | 
 | 24 | +    /// Free additional disk space in the job, by removing unused packages.  | 
 | 25 | +    #[serde(default)]  | 
 | 26 | +    pub free_disk: Option<bool>,  | 
 | 27 | +}  | 
 | 28 | + | 
 | 29 | +impl Job {  | 
 | 30 | +    /// By default, the Docker image of a job is based on its name.  | 
 | 31 | +    /// However, it can be overridden by its IMAGE environment variable.  | 
 | 32 | +    pub fn image(&self) -> String {  | 
 | 33 | +        self.env  | 
 | 34 | +            .get("IMAGE")  | 
 | 35 | +            .map(|v| v.as_str().expect("IMAGE value should be a string").to_string())  | 
 | 36 | +            .unwrap_or_else(|| self.name.clone())  | 
 | 37 | +    }  | 
 | 38 | + | 
 | 39 | +    fn is_linux(&self) -> bool {  | 
 | 40 | +        self.os.contains("ubuntu")  | 
 | 41 | +    }  | 
 | 42 | +}  | 
 | 43 | + | 
 | 44 | +#[derive(serde::Deserialize, Debug)]  | 
 | 45 | +struct JobEnvironments {  | 
 | 46 | +    #[serde(rename = "pr")]  | 
 | 47 | +    pr_env: BTreeMap<String, Value>,  | 
 | 48 | +    #[serde(rename = "try")]  | 
 | 49 | +    try_env: BTreeMap<String, Value>,  | 
 | 50 | +    #[serde(rename = "auto")]  | 
 | 51 | +    auto_env: BTreeMap<String, Value>,  | 
 | 52 | +}  | 
 | 53 | + | 
 | 54 | +#[derive(serde::Deserialize, Debug)]  | 
 | 55 | +pub struct JobDatabase {  | 
 | 56 | +    #[serde(rename = "pr")]  | 
 | 57 | +    pub pr_jobs: Vec<Job>,  | 
 | 58 | +    #[serde(rename = "try")]  | 
 | 59 | +    pub try_jobs: Vec<Job>,  | 
 | 60 | +    #[serde(rename = "auto")]  | 
 | 61 | +    pub auto_jobs: Vec<Job>,  | 
 | 62 | + | 
 | 63 | +    /// Shared environments for the individual run types.  | 
 | 64 | +    envs: JobEnvironments,  | 
 | 65 | +}  | 
 | 66 | + | 
 | 67 | +impl JobDatabase {  | 
 | 68 | +    /// Find `auto` jobs that correspond to the passed `pattern`.  | 
 | 69 | +    /// Patterns are matched using the glob syntax.  | 
 | 70 | +    /// For example `dist-*` matches all jobs starting with `dist-`.  | 
 | 71 | +    fn find_auto_jobs_by_pattern(&self, pattern: &str) -> Vec<Job> {  | 
 | 72 | +        self.auto_jobs  | 
 | 73 | +            .iter()  | 
 | 74 | +            .filter(|j| glob_match::glob_match(pattern, &j.name))  | 
 | 75 | +            .cloned()  | 
 | 76 | +            .collect()  | 
 | 77 | +    }  | 
 | 78 | +}  | 
 | 79 | + | 
 | 80 | +pub fn load_job_db(db: &str) -> anyhow::Result<JobDatabase> {  | 
 | 81 | +    let mut db: Value = serde_yaml::from_str(&db)?;  | 
 | 82 | + | 
 | 83 | +    // We need to expand merge keys (<<), because serde_yaml can't deal with them  | 
 | 84 | +    // `apply_merge` only applies the merge once, so do it a few times to unwrap nested merges.  | 
 | 85 | +    db.apply_merge()?;  | 
 | 86 | +    db.apply_merge()?;  | 
 | 87 | + | 
 | 88 | +    let db: JobDatabase = serde_yaml::from_value(db)?;  | 
 | 89 | +    Ok(db)  | 
 | 90 | +}  | 
 | 91 | + | 
 | 92 | +/// Representation of a job outputted to a GitHub Actions workflow.  | 
 | 93 | +#[derive(serde::Serialize, Debug)]  | 
 | 94 | +struct GithubActionsJob {  | 
 | 95 | +    /// The main identifier of the job, used by CI scripts to determine what should be executed.  | 
 | 96 | +    name: String,  | 
 | 97 | +    /// Helper label displayed in GitHub Actions interface, containing the job name and a run type  | 
 | 98 | +    /// prefix (PR/try/auto).  | 
 | 99 | +    full_name: String,  | 
 | 100 | +    os: String,  | 
 | 101 | +    env: BTreeMap<String, serde_json::Value>,  | 
 | 102 | +    #[serde(skip_serializing_if = "Option::is_none")]  | 
 | 103 | +    continue_on_error: Option<bool>,  | 
 | 104 | +    #[serde(skip_serializing_if = "Option::is_none")]  | 
 | 105 | +    free_disk: Option<bool>,  | 
 | 106 | +}  | 
 | 107 | + | 
 | 108 | +/// Skip CI jobs that are not supposed to be executed on the given `channel`.  | 
 | 109 | +fn skip_jobs(jobs: Vec<Job>, channel: &str) -> Vec<Job> {  | 
 | 110 | +    jobs.into_iter()  | 
 | 111 | +        .filter(|job| {  | 
 | 112 | +            job.only_on_channel.is_none() || job.only_on_channel.as_deref() == Some(channel)  | 
 | 113 | +        })  | 
 | 114 | +        .collect()  | 
 | 115 | +}  | 
 | 116 | + | 
 | 117 | +/// Type of workflow that is being executed on CI  | 
 | 118 | +#[derive(Debug)]  | 
 | 119 | +pub enum RunType {  | 
 | 120 | +    /// Workflows that run after a push to a PR branch  | 
 | 121 | +    PullRequest,  | 
 | 122 | +    /// Try run started with @bors try  | 
 | 123 | +    TryJob { job_patterns: Option<Vec<String>> },  | 
 | 124 | +    /// Merge attempt workflow  | 
 | 125 | +    AutoJob,  | 
 | 126 | +}  | 
 | 127 | + | 
 | 128 | +/// Maximum number of custom try jobs that can be requested in a single  | 
 | 129 | +/// `@bors try` request.  | 
 | 130 | +const MAX_TRY_JOBS_COUNT: usize = 20;  | 
 | 131 | + | 
 | 132 | +fn calculate_jobs(  | 
 | 133 | +    run_type: &RunType,  | 
 | 134 | +    db: &JobDatabase,  | 
 | 135 | +    channel: &str,  | 
 | 136 | +) -> anyhow::Result<Vec<GithubActionsJob>> {  | 
 | 137 | +    let (jobs, prefix, base_env) = match run_type {  | 
 | 138 | +        RunType::PullRequest => (db.pr_jobs.clone(), "PR", &db.envs.pr_env),  | 
 | 139 | +        RunType::TryJob { job_patterns } => {  | 
 | 140 | +            let jobs = if let Some(patterns) = job_patterns {  | 
 | 141 | +                let mut jobs: Vec<Job> = vec![];  | 
 | 142 | +                let mut unknown_patterns = vec![];  | 
 | 143 | +                for pattern in patterns {  | 
 | 144 | +                    let matched_jobs = db.find_auto_jobs_by_pattern(pattern);  | 
 | 145 | +                    if matched_jobs.is_empty() {  | 
 | 146 | +                        unknown_patterns.push(pattern.clone());  | 
 | 147 | +                    } else {  | 
 | 148 | +                        for job in matched_jobs {  | 
 | 149 | +                            if !jobs.iter().any(|j| j.name == job.name) {  | 
 | 150 | +                                jobs.push(job);  | 
 | 151 | +                            }  | 
 | 152 | +                        }  | 
 | 153 | +                    }  | 
 | 154 | +                }  | 
 | 155 | +                if !unknown_patterns.is_empty() {  | 
 | 156 | +                    return Err(anyhow::anyhow!(  | 
 | 157 | +                        "Patterns `{}` did not match any auto jobs",  | 
 | 158 | +                        unknown_patterns.join(", ")  | 
 | 159 | +                    ));  | 
 | 160 | +                }  | 
 | 161 | +                if jobs.len() > MAX_TRY_JOBS_COUNT {  | 
 | 162 | +                    return Err(anyhow::anyhow!(  | 
 | 163 | +                        "It is only possible to schedule up to {MAX_TRY_JOBS_COUNT} custom jobs, received {} custom jobs expanded from {} pattern(s)",  | 
 | 164 | +                        jobs.len(),  | 
 | 165 | +                        patterns.len()  | 
 | 166 | +                    ));  | 
 | 167 | +                }  | 
 | 168 | +                jobs  | 
 | 169 | +            } else {  | 
 | 170 | +                db.try_jobs.clone()  | 
 | 171 | +            };  | 
 | 172 | +            (jobs, "try", &db.envs.try_env)  | 
 | 173 | +        }  | 
 | 174 | +        RunType::AutoJob => (db.auto_jobs.clone(), "auto", &db.envs.auto_env),  | 
 | 175 | +    };  | 
 | 176 | +    let jobs = skip_jobs(jobs, channel);  | 
 | 177 | +    let jobs = jobs  | 
 | 178 | +        .into_iter()  | 
 | 179 | +        .map(|job| {  | 
 | 180 | +            let mut env: BTreeMap<String, serde_json::Value> = crate::yaml_map_to_json(base_env);  | 
 | 181 | +            env.extend(crate::yaml_map_to_json(&job.env));  | 
 | 182 | +            let full_name = format!("{prefix} - {}", job.name);  | 
 | 183 | + | 
 | 184 | +            GithubActionsJob {  | 
 | 185 | +                name: job.name,  | 
 | 186 | +                full_name,  | 
 | 187 | +                os: job.os,  | 
 | 188 | +                env,  | 
 | 189 | +                continue_on_error: job.continue_on_error,  | 
 | 190 | +                free_disk: job.free_disk,  | 
 | 191 | +            }  | 
 | 192 | +        })  | 
 | 193 | +        .collect();  | 
 | 194 | + | 
 | 195 | +    Ok(jobs)  | 
 | 196 | +}  | 
 | 197 | + | 
 | 198 | +pub fn calculate_job_matrix(  | 
 | 199 | +    db: JobDatabase,  | 
 | 200 | +    gh_ctx: GitHubContext,  | 
 | 201 | +    channel: &str,  | 
 | 202 | +) -> anyhow::Result<()> {  | 
 | 203 | +    let run_type = gh_ctx.get_run_type().ok_or_else(|| {  | 
 | 204 | +        anyhow::anyhow!("Cannot determine the type of workflow that is being executed")  | 
 | 205 | +    })?;  | 
 | 206 | +    eprintln!("Run type: {run_type:?}");  | 
 | 207 | + | 
 | 208 | +    let jobs = calculate_jobs(&run_type, &db, channel)?;  | 
 | 209 | +    if jobs.is_empty() {  | 
 | 210 | +        return Err(anyhow::anyhow!("Computed job list is empty"));  | 
 | 211 | +    }  | 
 | 212 | + | 
 | 213 | +    let run_type = match run_type {  | 
 | 214 | +        RunType::PullRequest => "pr",  | 
 | 215 | +        RunType::TryJob { .. } => "try",  | 
 | 216 | +        RunType::AutoJob => "auto",  | 
 | 217 | +    };  | 
 | 218 | + | 
 | 219 | +    eprintln!("Output");  | 
 | 220 | +    eprintln!("jobs={jobs:?}");  | 
 | 221 | +    eprintln!("run_type={run_type}");  | 
 | 222 | +    println!("jobs={}", serde_json::to_string(&jobs)?);  | 
 | 223 | +    println!("run_type={run_type}");  | 
 | 224 | + | 
 | 225 | +    Ok(())  | 
 | 226 | +}  | 
 | 227 | + | 
 | 228 | +pub fn find_linux_job<'a>(jobs: &'a [Job], name: &str) -> anyhow::Result<&'a Job> {  | 
 | 229 | +    let Some(job) = jobs.iter().find(|j| j.name == name) else {  | 
 | 230 | +        let available_jobs: Vec<&Job> = jobs.iter().filter(|j| j.is_linux()).collect();  | 
 | 231 | +        let mut available_jobs =  | 
 | 232 | +            available_jobs.iter().map(|j| j.name.to_string()).collect::<Vec<_>>();  | 
 | 233 | +        available_jobs.sort();  | 
 | 234 | +        return Err(anyhow::anyhow!(  | 
 | 235 | +            "Job {name} not found. The following jobs are available:\n{}",  | 
 | 236 | +            available_jobs.join(", ")  | 
 | 237 | +        ));  | 
 | 238 | +    };  | 
 | 239 | +    if !job.is_linux() {  | 
 | 240 | +        return Err(anyhow::anyhow!("Only Linux jobs can be executed locally"));  | 
 | 241 | +    }  | 
 | 242 | + | 
 | 243 | +    Ok(job)  | 
 | 244 | +}  | 
0 commit comments