diff --git a/pmrapp/src/ac.rs b/pmrapp/src/ac.rs index 035ac4f..92c549a 100644 --- a/pmrapp/src/ac.rs +++ b/pmrapp/src/ac.rs @@ -6,12 +6,8 @@ use leptos_router::{ }; use pmrcore::ac::{ agent::Agent, - genpolicy::Policy, user::User, - workflow::{ - State, - state::Transition, - }, + workflow::state::Transition, }; use crate::{ @@ -25,7 +21,6 @@ use api::{ SignOut, WorkflowTransition, current_user, - get_resource_policy_state, }; #[derive(Clone)] @@ -33,13 +28,6 @@ pub struct AccountCtx { pub current_user: ArcResource, ServerFnError>>, pub set_ps: ArcWriteSignal, pub res_ps: ArcResource, - // While the set_ps/res_ps does provide the PolicyState data, there - // needs to be a way to refresh this as the state may change due to - // user action (i.e. workflow state transitions); this provides the - // means to refresh that. - res_policy_state: ArcResource<()>, - // This is used to signal an update to the above from the action. - sig_policy_state: ArcRwSignal, } pub fn provide_session_context() { @@ -49,44 +37,18 @@ pub fn provide_session_context() { current_user().await }, ); - let sig_policy_state = ArcRwSignal::new(PolicyState::default()); let (ps, set_ps) = arc_signal(PolicyState::default()); - let policy_state_update = sig_policy_state.clone(); let ps_read = ps.clone(); let res_ps = ArcResource::new_blocking( move || ps_read.get(), - move |ps| { - let policy_state_update = policy_state_update.clone(); - async move { - policy_state_update.set(ps.clone()); - ps - } - }, - ); - - let state_read = sig_policy_state.clone(); - let res_policy_state_update = set_ps.clone(); - let res_policy_state = ArcResource::new_blocking( - move || (ps.get(), state_read.get()), - move |(policy_state, sig_policy_state)| { - let res_policy_state_update = res_policy_state_update.clone(); - async move { - if sig_policy_state.policy.is_some() { - Effect::new(move |_| { - res_policy_state_update.set(sig_policy_state.clone()); - }); - } - } - }, + move |ps| async move { ps }, ); provide_context(AccountCtx { current_user, set_ps, res_ps, - res_policy_state, - sig_policy_state, }); } @@ -108,9 +70,16 @@ pub fn WorkflowState() -> impl IntoView { let res_ps = account_ctx.res_ps.clone(); // leptos::logging::log!("{res_ps:?}"); Suspend::new(async move { - action.version().get(); - let res_ps = res_ps.await; - let res_ps_check = res_ps.clone(); + // TODO figure out where/how to deal with error here + let res_ps = action.value() + .get() + // we are just dropping error here, ideally we should check and + // render a error tooltip under the workflow state if there was + // a problem + .transpose() + .ok() + .flatten() + .unwrap_or(res_ps.await); let workflow_state = res_ps.state; if let Some(policy) = res_ps.policy { (policy.agent != Agent::Anonymous).then(|| Some(view! { @@ -120,29 +89,6 @@ pub fn WorkflowState() -> impl IntoView { > {workflow_state.to_string()} - {move || { - match action.value().get() { - Some(Ok(policy_state)) => { - leptos::logging::log!("policy_state.state = {:?}", policy_state); - leptos::logging::log!("got state={}", policy_state.state); - - let ctx = expect_context::(); - ctx.sig_policy_state.set(policy_state); - // upon hydration the signal is empty? Also this must be - // set _after_ the previous one, but this does cause a weird - // double fetch issue when that applies. - ctx.set_ps.set(res_ps_check.clone()); - - // To ensure that we don't loop, otherwise this arm will be - // triggered once more when this whole suspense is re-rendered; - // safe to do as the value has been handled. - action.value().set(None); - } - // TODO have this set an error somewhere? - // Some(Err(ServerFnError::WrappedServerError(e))) => e.to_string(), - _ => () - } - }} { TRANSITIONS.transitions_for(workflow_state, policy.to_roles()) diff --git a/pmrapp/src/ac/api.rs b/pmrapp/src/ac/api.rs index 8ed29e3..dc9cad2 100644 --- a/pmrapp/src/ac/api.rs +++ b/pmrapp/src/ac/api.rs @@ -2,11 +2,7 @@ use leptos::{ prelude::ServerFnError, server, }; -use pmrcore::ac::{ - genpolicy::Policy as GenPolicy, - user::User, - workflow::State, -}; +use pmrcore::ac::user::User; use std::{ convert::Infallible, fmt, @@ -28,6 +24,7 @@ mod ssr { Platform, }; use pmrcore::ac::agent::Agent; + pub use pmrcore::ac::workflow::State; pub use crate::{ server::platform, workflow::state::TRANSITIONS, @@ -165,26 +162,6 @@ pub(crate) async fn current_user() -> Result, ServerFnError> { Ok(session.user.map(|auth| auth.user().clone_inner())) } -#[server] -pub(crate) async fn get_resource_policy_state( - resource: String, -) -> Result { - let platform = platform().await?; - let state = platform - .ac_platform - .get_wf_state_for_res(&resource) - .await?; - let policy = if let Some(user) = current_user().await? { - Some(platform - .ac_platform - .generate_policy_for_agent_res(&user.into(), resource) - .await?) - } else { - None - }; - Ok(PolicyState::new(policy, state)) -} - #[server] pub(crate) async fn workflow_transition( resource: String, diff --git a/pmrapp/src/workspace.rs b/pmrapp/src/workspace.rs index cc8efbe..68c7689 100644 --- a/pmrapp/src/workspace.rs +++ b/pmrapp/src/workspace.rs @@ -92,7 +92,6 @@ fn workspace_root_page_ctx(current_owner: String) { // this should be a push? children elements will need to also use this and that's a // conflict. logging::log!("setup workspace_root_page_ctx"); - let resource = "/workspace/".to_string(); let cleanup_owner = current_owner.clone(); on_cleanup(move || {